code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 |
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(snake_case ) )
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> bool:
# Base Case
if index == len(snake_case ):
return True
# Recursive Step
for i in range(snake_case ):
if valid_coloring(graph[index] , snake_case , snake_case ):
# Color current vertex
__lowercase = i
# Validate coloring
if util_color(snake_case , snake_case , snake_case , index + 1 ):
return True
# Backtrack
__lowercase = -1
return False
def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[int]:
__lowercase = [-1] * len(snake_case )
if util_color(snake_case , snake_case , snake_case , 0 ):
return colored_vertices
return []
| 375 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase__ ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = KandinskyImgaImgPipeline
__UpperCAmelCase : int = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
__UpperCAmelCase : List[str] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
__UpperCAmelCase : List[str] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__UpperCAmelCase : str = False
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __lowercase ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Union[str, Any] = MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1005 ,)
_a : Tuple = MultilingualCLIP(_a )
_a : str = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[int] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_a : int = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
_a : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self : str ):
'''simple docstring'''
_a : Optional[Any] = self.dummy_text_encoder
_a : int = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : List[Any] = {
'num_train_timesteps': 1000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
_a : Any = DDIMScheduler(**_a )
_a : Dict = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowercase ( self : Dict ,_a : Dict ,_a : int=0 ):
'''simple docstring'''
_a : Optional[Any] = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_a ) ).to(_a )
_a : Dict = floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
_a : List[str] = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((256, 256) )
if str(_a ).startswith('mps' ):
_a : str = torch.manual_seed(_a )
else:
_a : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
_a : List[str] = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = 'cpu'
_a : Union[str, Any] = self.get_dummy_components()
_a : str = self.pipeline_class(**_a )
_a : List[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = pipe(**self.get_dummy_inputs(_a ) )
_a : Any = output.images
_a : Optional[int] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[str] = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
_a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_a : Optional[Any] = 'A red cartoon frog, 4k'
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : List[Any] = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' ,torch_dtype=torch.floataa )
_a : str = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_a : Dict = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
_a : Union[str, Any] = pipeline(
_a ,image=_a ,image_embeds=_a ,negative_image_embeds=_a ,generator=_a ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type='np' ,)
_a : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a ,_a )
| 702 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Any ,*_a : Optional[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' ,_a ,)
super().__init__(*_a ,**_a )
| 319 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase = 50_000
lowerCamelCase = 5_000
lowerCamelCase , lowerCamelCase = os.path.split(__file__)
lowerCamelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i]
@get_duration
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def a__ ( ):
UpperCAmelCase_ = {"num examples": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
UpperCAmelCase_ = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
UpperCAmelCase_ = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(lowerCAmelCase__ , "dataset.arrow" ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={"list": (100,)} , )
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print("shuffling dataset" )
UpperCAmelCase_ = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " , func.__name__ , str(lowerCAmelCase__ ) )
UpperCAmelCase_ = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , "wb" ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 82 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 94 | 0 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
# TODO Update this
SCREAMING_SNAKE_CASE__ : Tuple = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case ( UpperCamelCase_ ):
lowercase_ = 'esm'
def __init__( self : int , a_ : Dict=None , a_ : Any=None , a_ : List[Any]=None , a_ : Dict=768 , a_ : int=12 , a_ : Optional[Any]=12 , a_ : Tuple=3072 , a_ : Dict=0.1 , a_ : Any=0.1 , a_ : List[str]=1026 , a_ : List[Any]=0.02 , a_ : Optional[Any]=1e-1_2 , a_ : int="absolute" , a_ : List[str]=True , a_ : List[Any]=None , a_ : str=False , a_ : Any=False , a_ : List[str]=None , a_ : str=None , **a_ : List[str] , )-> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=a_ , mask_token_id=a_ , **a_ )
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = position_embedding_type
SCREAMING_SNAKE_CASE__ : Dict = use_cache
SCREAMING_SNAKE_CASE__ : List[str] = emb_layer_norm_before
SCREAMING_SNAKE_CASE__ : Union[str, Any] = token_dropout
SCREAMING_SNAKE_CASE__ : Dict = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
SCREAMING_SNAKE_CASE__ : List[Any] = EsmFoldConfig()
elif isinstance(a_ , a_ ):
SCREAMING_SNAKE_CASE__ : List[str] = EsmFoldConfig(**a_ )
SCREAMING_SNAKE_CASE__ : Any = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
SCREAMING_SNAKE_CASE__ : int = get_default_vocab_list()
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_list
else:
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , a_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def __lowercase( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , a_ ):
SCREAMING_SNAKE_CASE__ : int = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case :
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 128
lowercase_ = None
def __lowercase( self : List[str] )-> Dict:
"""simple docstring"""
if self.trunk is None:
SCREAMING_SNAKE_CASE__ : str = TrunkConfig()
elif isinstance(self.trunk , a_ ):
SCREAMING_SNAKE_CASE__ : Dict = TrunkConfig(**self.trunk )
def __lowercase( self : List[Any] )-> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = asdict(self )
SCREAMING_SNAKE_CASE__ : List[str] = self.trunk.to_dict()
return output
@dataclass
class snake_case :
lowercase_ = 48
lowercase_ = 1_024
lowercase_ = 128
lowercase_ = 32
lowercase_ = 32
lowercase_ = 32
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 128
lowercase_ = None
def __lowercase( self : Tuple )-> List[Any]:
"""simple docstring"""
if self.structure_module is None:
SCREAMING_SNAKE_CASE__ : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , a_ ):
SCREAMING_SNAKE_CASE__ : Dict = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
SCREAMING_SNAKE_CASE__ : int = self.sequence_state_dim // self.sequence_head_width
SCREAMING_SNAKE_CASE__ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def __lowercase( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = asdict(self )
SCREAMING_SNAKE_CASE__ : List[str] = self.structure_module.to_dict()
return output
@dataclass
class snake_case :
lowercase_ = 384
lowercase_ = 128
lowercase_ = 16
lowercase_ = 128
lowercase_ = 12
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 10
lowercase_ = 1e-8
lowercase_ = 1e5
def __lowercase( self : Union[str, Any] )-> str:
"""simple docstring"""
return asdict(self )
def _a ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 636 | class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( UpperCamelCase_ ):
pass
class snake_case :
def __init__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [
[],
[],
[],
]
def __lowercase( self : int , a_ : int , a_ : int )-> None:
"""simple docstring"""
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(a_ )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def __lowercase( self : int )-> int:
"""simple docstring"""
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self : Any )-> str:
"""simple docstring"""
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class snake_case :
def __init__( self : Union[str, Any] )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = []
def __lowercase( self : List[str] , a_ : int )-> None:
"""simple docstring"""
if len(self.queue ) == 100:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(a_ )
def __lowercase( self : int )-> int:
"""simple docstring"""
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = min(self.queue )
self.queue.remove(a_ )
return data
def __str__( self : List[str] )-> str:
"""simple docstring"""
return str(self.queue )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowercase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowercase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 636 | 1 |
from collections.abc import Sequence
from queue import Queue
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
__A : Any = start
__A : Tuple = end
__A : str = val
__A : Optional[int] = (start + end) // 2
__A : str = left
__A : str = right
def __repr__( self ):
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = collection
__A : Union[str, Any] = function
if self.collection:
__A : int = self._build_tree(0 , len(lowercase_ ) - 1 )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
self._update_tree(self.root , lowercase_ , lowercase_ )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
return self._query_range(self.root , lowercase_ , lowercase_ )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase ):
if start == end:
return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start] )
__A : int = (start + end) // 2
__A : int = self._build_tree(lowercase_ , lowercase_ )
__A : Optional[Any] = self._build_tree(mid + 1 , lowercase_ )
return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val ) , lowercase_ , lowercase_ )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if node.start == i and node.end == i:
__A : Dict = val
return
if i <= node.mid:
self._update_tree(node.left , lowercase_ , lowercase_ )
else:
self._update_tree(node.right , lowercase_ , lowercase_ )
__A : Dict = self.fn(node.left.val , node.right.val )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowercase_ , lowercase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowercase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , lowercase_ , lowercase_ )
def __UpperCAmelCase( self ):
if self.root is not None:
__A : Any = Queue()
queue.put(self.root )
while not queue.empty():
__A : List[Any] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
UpperCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 520 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
a : str = parser.parse_args()
a : List[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 640 | 0 |
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( _A , unittest.TestCase ):
_lowercase : List[str] = CodeGenTokenizer
_lowercase : Optional[Any] = CodeGenTokenizerFast
_lowercase : Tuple = True
_lowercase : Union[str, Any] = {"add_prefix_space": True}
_lowercase : Dict = False
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
__magic_name__ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__magic_name__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__magic_name__ = {"unk_token": "<unk>"}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , **__UpperCamelCase: List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , **__UpperCamelCase: int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , __UpperCamelCase: List[str] ):
'''simple docstring'''
__magic_name__ = "lower newer"
__magic_name__ = "lower newer"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ = "lower newer"
__magic_name__ = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
__magic_name__ = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = tokens + [tokenizer.unk_token]
__magic_name__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
__magic_name__ = "lower newer"
# Testing tokenization
__magic_name__ = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__magic_name__ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids without special tokens
__magic_name__ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__magic_name__ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing conversion to ids with special tokens
__magic_name__ = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase )
__magic_name__ = tokenizer.encode(__UpperCamelCase , add_prefix_space=__UpperCamelCase )
__magic_name__ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# Testing the unknown token
__magic_name__ = tokens + [rust_tokenizer.unk_token]
__magic_name__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Tuple , *__UpperCamelCase: Tuple , **__UpperCamelCase: Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , __UpperCamelCase: str=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__magic_name__ = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
# Simple input
__magic_name__ = "This is a simple input"
__magic_name__ = ["This is a simple input 1", "This is a simple input 2"]
__magic_name__ = ("This is a simple input", "This is a pair")
__magic_name__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Simple input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' )
# Pair input
self.assertRaises(
__UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='max_length' , )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
__magic_name__ = "This is a simple input"
__magic_name__ = ["This is a simple input looooooooong", "This is a simple input"]
__magic_name__ = ("This is a simple input", "This is a pair")
__magic_name__ = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
__magic_name__ = tokenizer.pad_token_id
__magic_name__ = tokenizer(__UpperCamelCase , padding='max_length' , max_length=30 , return_tensors='np' )
__magic_name__ = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='np' )
__magic_name__ = tokenizer(*__UpperCamelCase , padding='max_length' , max_length=60 , return_tensors='np' )
__magic_name__ = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = "$$$"
__magic_name__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCamelCase , add_bos_token=__UpperCamelCase )
__magic_name__ = "This is a simple input"
__magic_name__ = ["This is a simple input 1", "This is a simple input 2"]
__magic_name__ = tokenizer.bos_token_id
__magic_name__ = tokenizer(__UpperCamelCase )
__magic_name__ = tokenizer(__UpperCamelCase )
self.assertEqual(out_s.input_ids[0] , __UpperCamelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__magic_name__ = tokenizer.decode(out_s.input_ids )
__magic_name__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __UpperCamelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
__magic_name__ = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
__magic_name__ = "\nif len_a > len_b: result = a\nelse: result = b"
__magic_name__ = tokenizer.encode(__UpperCamelCase )
__magic_name__ = ["^#", re.escape('<|endoftext|>' ), "^'''", "^\"\"\"", "\n\n\n"]
__magic_name__ = tokenizer.decode(__UpperCamelCase , truncate_before_pattern=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
pass
| 713 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__UpperCamelCase ):
__magic_name__ = AutoConfig.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxBertModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Optional[Any] ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
@slow
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__magic_name__ = AutoTokenizer.from_pretrained(__UpperCamelCase )
__magic_name__ = FlaxRobertaModel.from_pretrained(__UpperCamelCase )
__magic_name__ = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**__UpperCamelCase: Any ):
return model(**__UpperCamelCase )
eval(**__UpperCamelCase ).block_until_ready()
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ = FlaxAutoModel.from_pretrained('bert-base' )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ = FlaxAutoModel.from_pretrained(__UpperCamelCase , revision='aaaaaa' )
def _SCREAMING_SNAKE_CASE ( self: str ):
'''simple docstring'''
with self.assertRaisesRegex(
__UpperCamelCase , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def _SCREAMING_SNAKE_CASE ( self: int ):
'''simple docstring'''
with self.assertRaisesRegex(__UpperCamelCase , 'Use `from_pt=True` to load this model' ):
__magic_name__ = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 184 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: Any = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
A__: Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list[float] ) -> None:
if point:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for item in point:
if not isinstance(_UpperCAmelCase ,(int, float) ):
_a : str =(
"""Expected a list of numbers as input, found """
F"{type(_UpperCAmelCase ).__name__}"
)
raise TypeError(_UpperCAmelCase )
else:
_a : List[Any] =F"Expected a list of numbers as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
else:
raise ValueError("""Missing an input""" )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ) -> float:
_validate_point(_UpperCAmelCase )
_validate_point(_UpperCAmelCase )
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCAmelCase ,_UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 694 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : str = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[int] = 'gptsan-japanese'
SCREAMING_SNAKE_CASE:Optional[int] = [
'past_key_values',
]
SCREAMING_SNAKE_CASE:Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=3_6000 , _a=1280 , _a=1024 , _a=8192 , _a=4096 , _a=128 , _a=10 , _a=0 , _a=16 , _a=16 , _a=128 , _a=0.0 , _a=1e-5 , _a=False , _a=0.0 , _a="float32" , _a=False , _a=False , _a=False , _a=0.002 , _a=False , _a=True , _a=3_5998 , _a=3_5995 , _a=3_5999 , **_a , ):
"""simple docstring"""
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = d_ff
a__ = d_ext
a__ = d_spout
a__ = num_switch_layers
a__ = num_ext_layers
a__ = num_switch_layers + num_ext_layers
a__ = num_heads
a__ = num_experts
a__ = expert_capacity
a__ = dropout_rate
a__ = layer_norm_epsilon
a__ = router_bias
a__ = router_jitter_noise
a__ = router_dtype
a__ = router_ignore_padding_tokens
a__ = output_hidden_states
a__ = output_attentions
a__ = initializer_factor
a__ = output_router_logits
a__ = use_cache
super().__init__(
separator_token_id=_a , pad_token_id=_a , eos_token_id=_a , **_a , )
| 702 |
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__A : Optional[int] = datasets.load_iris()
__A : Optional[Any] = np.array(data['data'])
__A : Tuple = np.array(data['target'])
__A : int = data['target_names']
__A , __A , __A , __A : Optional[Any] = train_test_split(X, y)
def lowerCAmelCase_ ( a : str , a : int ):
return np.linalg.norm(np.array(a ) - np.array(a ) )
def lowerCAmelCase_ ( a : Dict , a : str , a : List[Any] , a : Any , a : Dict=5 ):
a__ = zip(a , a )
# List of distances of all points from the point to be classified
a__ = []
for data_point in data:
a__ = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
a__ = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
a__ = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 126 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'dandelin/vilt-b32-finetuned-vqa'
SCREAMING_SNAKE_CASE_ = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
SCREAMING_SNAKE_CASE_ = 'image_qa'
SCREAMING_SNAKE_CASE_ = AutoProcessor
SCREAMING_SNAKE_CASE_ = AutoModelForVisualQuestionAnswering
SCREAMING_SNAKE_CASE_ = ['image', 'text']
SCREAMING_SNAKE_CASE_ = ['text']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.pre_processor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE_ ).logits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 42 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = None
def _snake_case ( lowercase__ , lowercase__=0.9_9_9 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_lowerCamelCase : Optional[Any] = []
for i in range(lowercase__ ):
_lowerCamelCase : Optional[Any] = i / num_diffusion_timesteps
_lowerCamelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class lowerCAmelCase__ ( lowercase, lowercase ):
'''simple docstring'''
lowerCamelCase__ = 1
@register_to_config
def __init__( self , lowercase = 1000 , lowercase = 0.00_01 , lowercase = 0.02 , lowercase = "linear" , lowercase = None , lowercase = True , lowercase = True , lowercase = 0 , lowercase = "epsilon" , lowercase = 1.0 , **lowercase , ):
if kwargs.get('set_alpha_to_one' , lowercase ) is not None:
_lowerCamelCase : List[str] = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , lowercase , standard_warn=lowercase )
_lowerCamelCase : Union[str, Any] = kwargs['set_alpha_to_one']
if trained_betas is not None:
_lowerCamelCase : Tuple = torch.tensor(lowercase , dtype=torch.floataa )
elif beta_schedule == "linear":
_lowerCamelCase : List[str] = torch.linspace(lowercase , lowercase , lowercase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCamelCase : Any = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCamelCase : Tuple = betas_for_alpha_bar(lowercase )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
_lowerCamelCase : str = 1.0 - self.betas
_lowerCamelCase : str = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
_lowerCamelCase : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
_lowerCamelCase : Optional[Any] = 1.0
# setable values
_lowerCamelCase : Dict = None
_lowerCamelCase : int = torch.from_numpy(np.arange(0 , lowercase ).copy().astype(np.intaa ) )
def A_ ( self , lowercase , lowercase = None ):
return sample
def A_ ( self , lowercase , lowercase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
F'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
F''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
F''' maximal {self.config.num_train_timesteps} timesteps.''' )
_lowerCamelCase : int = num_inference_steps
_lowerCamelCase : Optional[int] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_lowerCamelCase : Any = (np.arange(0 , lowercase ) * step_ratio).round().copy().astype(np.intaa )
_lowerCamelCase : Any = torch.from_numpy(lowercase ).to(lowercase )
self.timesteps += self.config.steps_offset
def A_ ( self , lowercase , lowercase , lowercase , lowercase = 0.0 , lowercase = False , lowercase = None , lowercase = True , ):
# 1. get previous step value (=t+1)
_lowerCamelCase : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
_lowerCamelCase : Dict = self.alphas_cumprod[timestep]
_lowerCamelCase : str = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
_lowerCamelCase : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
_lowerCamelCase : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
_lowerCamelCase : Any = model_output
elif self.config.prediction_type == "sample":
_lowerCamelCase : Optional[Any] = model_output
_lowerCamelCase : Tuple = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
_lowerCamelCase : Any = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
_lowerCamelCase : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
_lowerCamelCase : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : Optional[int] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCamelCase : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase )
def __len__( self ):
return self.config.num_train_timesteps | 630 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__snake_case = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__snake_case = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
UpperCamelCase = self.transformer_dir
shutil.copy(
os.path.join(__magic_name__ , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def lowerCamelCase_ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Tuple=None ):
"""simple docstring"""
UpperCamelCase = comment + F'\nclass {class_name}(nn.Module):\n' + class_code
if overwrite_result is not None:
UpperCamelCase = comment + F'\nclass {class_name}(nn.Module):\n' + overwrite_result
UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
UpperCamelCase = black.format_str(__magic_name__ , mode=__magic_name__ )
UpperCamelCase = os.path.join(self.transformer_dir , """new_code.py""" )
with open(__magic_name__ , """w""" , newline="""\n""" ) as f:
f.write(__magic_name__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__magic_name__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__magic_name__ )
with open(__magic_name__ , """r""" ) as f:
self.assertTrue(f.read() , __magic_name__ )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , __magic_name__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , __magic_name__ ) , )
# Copy consistency with a really long name
UpperCamelCase = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}' , F'{long_class_name}LMPredictionHead' , re.sub("""Bert""" , __magic_name__ , __magic_name__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , __magic_name__ , overwrite_result=re.sub("""Bert""" , """TestModel""" , __magic_name__ ) , )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
self.assertFalse(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__magic_name__ )
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
UpperCamelCase , UpperCamelCase = check_copies.convert_to_localized_md(
__magic_name__ , __magic_name__ , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(__magic_name__ , __magic_name__ )
| 181 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
__snake_case = 3
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
UpperCamelCase = random.randrange(3 , SCREAMING_SNAKE_CASE_ )
if pow(SCREAMING_SNAKE_CASE_ , 2 , SCREAMING_SNAKE_CASE_ ) == 1:
continue
if pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) == 1:
continue
return g
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
print("""Generating prime p...""" )
UpperCamelCase = rabin_miller.generate_large_prime(SCREAMING_SNAKE_CASE_ ) # select large prime number.
UpperCamelCase = primitive_root(SCREAMING_SNAKE_CASE_ ) # one primitive root on modulo p.
UpperCamelCase = random.randrange(3 , SCREAMING_SNAKE_CASE_ ) # private_key -> have to be greater than 2 for safety.
UpperCamelCase = cryptomath.find_mod_inverse(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = (key_size, e_a, e_a, p)
UpperCamelCase = (key_size, d)
return public_key, private_key
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
UpperCamelCase , UpperCamelCase = generate_key(SCREAMING_SNAKE_CASE_ )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , """w""" ) as fo:
fo.write(f'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , """w""" ) as fo:
fo.write(f'{private_key[0]},{private_key[1]}' )
def _lowercase ( ):
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2_048 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 181 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase_ : Optional[int] = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''microsoft/table-transformer-detection''': (
'''https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'''
),
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : int = "table-transformer"
_UpperCamelCase : Dict = ["past_key_values"]
_UpperCamelCase : List[Any] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=1_00 , lowerCamelCase_ : str=6 , lowerCamelCase_ : int=20_48 , lowerCamelCase_ : List[str]=8 , lowerCamelCase_ : List[Any]=6 , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : Dict=8 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str="relu" , lowerCamelCase_ : Optional[Any]=2_56 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Dict=0.02 , lowerCamelCase_ : Optional[Any]=1.0 , lowerCamelCase_ : Any=False , lowerCamelCase_ : Dict="sine" , lowerCamelCase_ : Any="resnet50" , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Any=1 , lowerCamelCase_ : int=5 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : int=1 , lowerCamelCase_ : Union[str, Any]=1 , lowerCamelCase_ : List[Any]=5 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.1 , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_snake_case : Tuple = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_snake_case : int = backbone_config.get('model_type' )
_snake_case : List[Any] = CONFIG_MAPPING[backbone_model_type]
_snake_case : Tuple = config_class.from_dict(lowerCamelCase_ )
# set timm attributes to None
_snake_case , _snake_case , _snake_case : Optional[Any] = None, None, None
_snake_case : Optional[int] = use_timm_backbone
_snake_case : Union[str, Any] = backbone_config
_snake_case : Dict = num_channels
_snake_case : Any = num_queries
_snake_case : List[Any] = d_model
_snake_case : List[Any] = encoder_ffn_dim
_snake_case : str = encoder_layers
_snake_case : str = encoder_attention_heads
_snake_case : str = decoder_ffn_dim
_snake_case : str = decoder_layers
_snake_case : int = decoder_attention_heads
_snake_case : str = dropout
_snake_case : Optional[int] = attention_dropout
_snake_case : str = activation_dropout
_snake_case : Optional[int] = activation_function
_snake_case : Dict = init_std
_snake_case : Any = init_xavier_std
_snake_case : str = encoder_layerdrop
_snake_case : Union[str, Any] = decoder_layerdrop
_snake_case : List[Any] = encoder_layers
_snake_case : Union[str, Any] = auxiliary_loss
_snake_case : Union[str, Any] = position_embedding_type
_snake_case : Optional[Any] = backbone
_snake_case : int = use_pretrained_backbone
_snake_case : int = dilation
# Hungarian matcher
_snake_case : Any = class_cost
_snake_case : List[Any] = bbox_cost
_snake_case : List[Any] = giou_cost
# Loss coefficients
_snake_case : int = mask_loss_coefficient
_snake_case : Optional[int] = dice_loss_coefficient
_snake_case : Union[str, Any] = bbox_loss_coefficient
_snake_case : Optional[int] = giou_loss_coefficient
_snake_case : Union[str, Any] = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.d_model
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : int = version.parse("1.11" )
@property
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __UpperCAmelCase ( self : str ):
'''simple docstring'''
return 1e-5
@property
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
return 12
| 304 |
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
lowercase_ : dict[str, float] = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
_snake_case : Union[str, Any] = (
F'''Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n'''
F'''Valid values are: {', '.join(__lowerCAmelCase )}'''
)
raise ValueError(__lowerCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 304 | 1 |
A_ = "Input must be a string of 8 numbers plus letter"
A_ = "TRWAGMYFPDXBNJZSQVHLCKE"
def __UpperCamelCase ( a) ->bool:
if not isinstance(a, a):
lowerCamelCase__ = f"Expected string as input, found {type(a).__name__}"
raise TypeError(a)
lowerCamelCase__ = spanish_id.replace("-", "").upper()
if len(a) != 9:
raise ValueError(a)
try:
lowerCamelCase__ = int(spanish_id_clean[0:8])
lowerCamelCase__ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(a) from ex
if letter.isdigit():
raise ValueError(a)
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class SCREAMING_SNAKE_CASE_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __magic_name__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=_lowerCAmelCase , )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
class SCREAMING_SNAKE_CASE_ ( datasets.BeamBasedBuilder ):
"""simple docstring"""
def __magic_name__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=_lowerCAmelCase , )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_lowerCAmelCase )
def __UpperCamelCase ( ) ->Optional[Any]:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"])]
def __UpperCamelCase ( ) ->Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"])]
class SCREAMING_SNAKE_CASE_ ( lowercase_ ):
"""simple docstring"""
@require_beam
def __magic_name__ ( self ):
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=_lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase , builder.name , "default" , "0.0.0" , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __magic_name__ ( self ):
import apache_beam as beam
lowerCamelCase__ = beam.io.parquetio.WriteToParquet
lowerCamelCase__ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=_lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
lowerCamelCase__ = partial(_lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase , builder.name , "default" , "0.0.0" , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_lowerCAmelCase , builder.name , "default" , "0.0.0" , F"{builder.name}-train-00000-of-00002.arrow" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def __magic_name__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = DummyBeamDataset(cache_dir=_lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def __magic_name__ ( self ):
lowerCamelCase__ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCamelCase__ = NestedBeamDataset(cache_dir=_lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_lowerCAmelCase , builder.name , "default" , "0.0.0" , F"{builder.name}-train.arrow" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
lowerCamelCase__ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , _lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , _lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 360 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True})
__magic_name__ : ClassVar[Features] = Features({"""image""": Image()})
__magic_name__ : ClassVar[Features] = Features({"""labels""": ClassLabel})
__magic_name__ : str = "image"
__magic_name__ : str = "labels"
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
A__ : Any =copy.deepcopy(self )
A__ : List[Any] =self.label_schema.copy()
A__ : Optional[int] =features[self.label_column]
A__ : Optional[Any] =label_schema
return task_template
@property
def _UpperCAmelCase ( self : Dict ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
from __future__ import annotations
def UpperCamelCase( lowercase_ ) -> float:
'''simple docstring'''
snake_case_ = 0.00
snake_case_ = 0
for resistor in resistors:
if resistor <= 0:
snake_case_ = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(lowercase_ )
first_sum += 1 / float(lowercase_ )
index += 1
return 1 / first_sum
def UpperCamelCase( lowercase_ ) -> float:
'''simple docstring'''
snake_case_ = 0.00
snake_case_ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case_ = f'''Resistor at index {index} has a negative value!'''
raise ValueError(lowercase_ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 161 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase_ = {
'''google/electra-small-generator''': 512,
'''google/electra-base-generator''': 512,
'''google/electra-large-generator''': 512,
'''google/electra-small-discriminator''': 512,
'''google/electra-base-discriminator''': 512,
'''google/electra-large-discriminator''': 512,
}
lowerCamelCase_ = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( __snake_case ):
lowerCamelCase_ : List[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : int = ElectraTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase="[UNK]" , lowerCamelCase="[SEP]" , lowerCamelCase="[PAD]" , lowerCamelCase="[CLS]" , lowerCamelCase="[MASK]" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ) -> Union[str, Any]:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
snake_case_ = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**lowerCamelCase )
snake_case_ = do_lower_case
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase=None ) -> Dict:
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
snake_case_ = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase ) | 161 | 1 |
'''simple docstring'''
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
class _snake_case ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = ['pixel_values']
def __init__( self: Optional[Any] , __UpperCamelCase: bool = True , __UpperCamelCase: Union[int, float] = 1 / 255 , __UpperCamelCase: bool = True , __UpperCamelCase: int = 8 , **__UpperCamelCase: List[Any] , ) -> List[str]:
super().__init__(**__lowercase )
__magic_name__ : Optional[Any] = do_rescale
__magic_name__ : Any = rescale_factor
__magic_name__ : Optional[int] = do_pad
__magic_name__ : int = pad_size
def lowerCAmelCase__ ( self: Any , __UpperCamelCase: np.ndarray , __UpperCamelCase: float , __UpperCamelCase: Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase: Any ) -> List[str]:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def lowerCAmelCase__ ( self: int , __UpperCamelCase: np.ndarray , __UpperCamelCase: int , __UpperCamelCase: Optional[Union[str, ChannelDimension]] = None ) -> Any:
__magic_name__ , __magic_name__ : int = get_image_size(__lowercase )
__magic_name__ : Optional[int] = (old_height // size + 1) * size - old_height
__magic_name__ : str = (old_width // size + 1) * size - old_width
return pad(__lowercase , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=__lowercase )
def lowerCAmelCase__ ( self: int , __UpperCamelCase: ImageInput , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[float] = None , __UpperCamelCase: Optional[bool] = None , __UpperCamelCase: Optional[int] = None , __UpperCamelCase: Optional[Union[str, TensorType]] = None , __UpperCamelCase: Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase: Optional[int] , ) -> str:
__magic_name__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : List[Any] = do_pad if do_pad is not None else self.do_pad
__magic_name__ : List[Any] = pad_size if pad_size is not None else self.pad_size
__magic_name__ : Optional[Any] = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
__magic_name__ : int = [to_numpy_array(__lowercase ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_pad:
__magic_name__ : Optional[Any] = [self.pad(__lowercase , size=__lowercase ) for image in images]
__magic_name__ : Any = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__magic_name__ : int = {"pixel_values": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase ) | 436 |
import os
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str = "input.txt" ):
"""simple docstring"""
with open(os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) as input_file:
__a = [
[int(_SCREAMING_SNAKE_CASE ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__a = len(_SCREAMING_SNAKE_CASE )
__a = len(matrix[0] )
__a = [[-1 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
__a = matrix[i][0]
for j in range(1 , _SCREAMING_SNAKE_CASE ):
for i in range(_SCREAMING_SNAKE_CASE ):
__a = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , _SCREAMING_SNAKE_CASE ):
__a = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 225 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_lowerCAmelCase = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def _snake_case ( __snake_case , __snake_case=None , __snake_case=None , __snake_case=None ):
_UpperCamelCase = True
while ask_again:
_UpperCamelCase = input(__snake_case )
try:
if default is not None and len(__snake_case ) == 0:
return default
return convert_value(__snake_case ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case )
def _snake_case ( __snake_case , __snake_case=[] , __snake_case=None , __snake_case=0 ):
_UpperCamelCase = BulletMenu(__snake_case , __snake_case )
_UpperCamelCase = menu.run(default_choice=__snake_case )
return convert_value(__snake_case ) if convert_value is not None else result
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _snake_case ( __snake_case ):
_UpperCamelCase = int(__snake_case )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _snake_case ( __snake_case ):
return {"yes": True, "no": False}[value.lower()]
class lowerCAmelCase_ ( argparse.RawDescriptionHelpFormatter ):
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : Any , _A : Dict , _A : Union[str, Any] ):
_UpperCamelCase = super()._format_usage(_A , _A , _A , _A )
_UpperCamelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 71 | def _snake_case ( __snake_case ):
if not isinstance(__snake_case , __snake_case ):
raise TypeError('''Input value must be an \'int\' type''' )
_UpperCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71 | 1 |
import re
def __UpperCamelCase ( lowercase__ : str ) -> list:
'''simple docstring'''
return [char.split() for char in re.split(R"""[^ a-z A-Z 0-9 \s]""" , str_ )]
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = split_input(str_ )
return "".join(
["""""".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : bool , lowercase__ : str ) -> str:
'''simple docstring'''
try:
lowerCAmelCase_ : int = split_input(lowercase__ )
if upper:
lowerCAmelCase_ : Any = """""".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase_ : Optional[Any] = """""".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
return to_simple_case(lowercase__ )
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
try:
lowerCAmelCase_ : int = to_simple_case(lowercase__ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __UpperCamelCase ( lowercase__ : str , lowercase__ : bool ) -> str:
'''simple docstring'''
return to_complex_case(lowercase__ , lowercase__ , """_""" )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : bool ) -> str:
'''simple docstring'''
return to_complex_case(lowercase__ , lowercase__ , """-""" )
if __name__ == "__main__":
__import__('doctest').testmod()
| 600 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : Tuple = True
__snake_case : List[str] = """ml.p3.2xlarge"""
__snake_case : Optional[int] = """accelerate_sagemaker_execution_role"""
__snake_case : List[Any] = """hf-sm"""
__snake_case : str = """us-east-1"""
__snake_case : int = 1
__snake_case : int = """accelerate-sagemaker-1"""
__snake_case : Union[str, Any] = """1.6"""
__snake_case : Tuple = """4.4"""
__snake_case : List[str] = """train.py"""
__snake_case : str = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""False""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
__snake_case : List[Any] = [
"""--model_name_or_path""",
"""bert""",
"""--do_train""",
"""--do_test""",
"""False""",
"""--do_predict""",
"""--epochs""",
"""3""",
"""--learning_rate""",
"""5e-5""",
"""--max_steps""",
"""50.5""",
]
class __a ( unittest.TestCase ):
def A ( self : int ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
lowerCAmelCase_ : Dict = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , UpperCAmelCase )
assert isinstance(converted_args["""do_train"""] , UpperCAmelCase )
assert isinstance(converted_args["""epochs"""] , UpperCAmelCase )
assert isinstance(converted_args["""learning_rate"""] , UpperCAmelCase )
assert isinstance(converted_args["""max_steps"""] , UpperCAmelCase )
with pytest.raises(UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 600 | 1 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE : Dict = '''#'''
class snake_case_ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
A__ = {}
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self._trie
for char in text:
if char not in trie:
A__ = {}
A__ = trie[char]
A__ = True
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = self._trie
for char in prefix:
if char in trie:
A__ = trie[char]
else:
return []
return self._elements(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = []
for c, v in d.items():
A__ = [' '] if c == END else [(c + s) for s in self._elements(__a )]
result.extend(__a )
return tuple(__a )
SCREAMING_SNAKE_CASE : List[Any] = Trie()
SCREAMING_SNAKE_CASE : Tuple = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = trie.find_word(lowerCAmelCase__ )
return tuple(string + word for word in suffixes )
def __lowerCamelCase ( ):
print(autocomplete_using_trie('de' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 554 |
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return 1 if input_a == input_a else 0
def __lowerCamelCase ( ):
assert xnor_gate(0 ,0 ) == 1
assert xnor_gate(0 ,1 ) == 0
assert xnor_gate(1 ,0 ) == 0
assert xnor_gate(1 ,1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 554 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Any:
# Construct model
if gpta_config_file == "":
A_ = GPTaConfig()
else:
A_ = GPTaConfig.from_json_file(UpperCAmelCase__ )
A_ = GPTaModel(UpperCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
# Save pytorch-model
A_ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A_ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict(), UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__, """w""", encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__lowerCamelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 288 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( _snake_case ):
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """num_encoder_blocks""" ) )
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=64 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=[2, 2, 2, 2] , UpperCamelCase__=[8, 4, 2, 1] , UpperCamelCase__=[16, 32, 64, 128] , UpperCamelCase__=[1, 4, 8, 16] , UpperCamelCase__=[1, 2, 4, 8] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Tuple:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = num_encoder_blocks
A_ = sr_ratios
A_ = depths
A_ = hidden_sizes
A_ = downsampling_rates
A_ = num_attention_heads
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = scope
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
A_ = SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ )
A_ = A_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = self.num_labels
A_ = SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = 1
A_ = SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A_ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
A_ , A_ , A_ = config_and_inputs
A_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = SegformerModelTester(self )
A_ = SegformerConfigTester(self , config_class=UpperCamelCase__ )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
pass
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(UpperCamelCase__ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
A_ = True
A_ = False
A_ = True
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.attentions
A_ = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A_ = True
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A_ = (self.model_tester.image_size // 4) ** 2
A_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A_ = (self.model_tester.image_size // 32) ** 2
A_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
A_ = True
A_ = True
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A_ = (self.model_tester.image_size // 4) ** 2
A_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A_ = outputs.hidden_states
A_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A_ = model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
pass
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def UpperCAmelCase__ ( ) -> Any:
A_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Any:
'''simple docstring'''
# only resize + normalize
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
A_ = encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ )
A_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
# only resize + normalize
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A_ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
A_ = encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ )
A_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-1 ) )
@slow
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
# only resize + normalize
A_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A_ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase__ )
A_ = prepare_img()
A_ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" )
A_ = encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A_ = model(UpperCamelCase__ )
A_ = outputs.logits.detach().cpu()
A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A_ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 288 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_lowerCamelCase = Features({"""audio""": Audio()} )
_lowerCamelCase = Features({"""labels""": ClassLabel} )
_lowerCamelCase = """audio"""
_lowerCamelCase = """labels"""
def A_ ( self ,lowercase):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] ,lowercase):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
UpperCAmelCase_ : Optional[int] = copy.deepcopy(self)
UpperCAmelCase_ : Dict = self.label_schema.copy()
UpperCAmelCase_ : Any = features[self.label_column]
UpperCAmelCase_ : int = label_schema
return task_template
@property
def A_ ( self):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 710 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 42
_lowerCamelCase = 42
class snake_case_ (lowercase__ , lowercase__ ):
"""simple docstring"""
_lowerCamelCase = 1
@register_to_config
def __init__( self ,lowercase = 2000 ,lowercase = 0.15 ,lowercase = 0.01 ,lowercase = 1348.0 ,lowercase = 1E-5 ,lowercase = 1 ,):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = sigma_max
# setable values
UpperCAmelCase_ : Optional[int] = None
self.set_sigmas(lowercase ,lowercase ,lowercase ,lowercase)
def A_ ( self ,lowercase ,lowercase = None):
"""simple docstring"""
return sample
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase_ : List[Any] = torch.linspace(1 ,lowercase ,lowercase ,device=lowercase)
def A_ ( self ,lowercase ,lowercase = None ,lowercase = None ,lowercase = None):
"""simple docstring"""
UpperCAmelCase_ : Any = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase_ : int = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase_ : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase ,lowercase)
UpperCAmelCase_ : Union[str, Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase_ : Optional[int] = torch.exp(torch.linspace(math.log(lowercase) ,math.log(lowercase) ,lowercase))
UpperCAmelCase_ : Any = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def A_ ( self ,lowercase ,lowercase):
"""simple docstring"""
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def A_ ( self ,lowercase ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
UpperCAmelCase_ : Optional[int] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase_ : Tuple = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase_ : Optional[int] = timesteps.to(self.discrete_sigmas.device)
UpperCAmelCase_ : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
UpperCAmelCase_ : Optional[Any] = self.get_adjacent_sigma(lowercase ,lowercase).to(sample.device)
UpperCAmelCase_ : Any = torch.zeros_like(lowercase)
UpperCAmelCase_ : Dict = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase_ : Dict = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
UpperCAmelCase_ : List[str] = diffusion.unsqueeze(-1)
UpperCAmelCase_ : List[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase_ : Union[str, Any] = randn_tensor(
sample.shape ,layout=sample.layout ,generator=lowercase ,device=sample.device ,dtype=sample.dtype)
UpperCAmelCase_ : Any = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase_ : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase ,prev_sample_mean=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase = None ,lowercase = True ,):
"""simple docstring"""
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase_ : int = randn_tensor(sample.shape ,layout=sample.layout ,generator=lowercase).to(sample.device)
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase_ : Union[str, Any] = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : Optional[Any] = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
UpperCAmelCase_ : List[Any] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase_ : Optional[Any] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase_ : Any = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
UpperCAmelCase_ : Tuple = step_size.unsqueeze(-1)
UpperCAmelCase_ : Dict = sample + step_size * model_output
UpperCAmelCase_ : int = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase)
def A_ ( self ,lowercase ,lowercase ,lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : Any = timesteps.to(original_samples.device)
UpperCAmelCase_ : List[str] = self.discrete_sigmas.to(original_samples.device)[timesteps]
UpperCAmelCase_ : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase) * sigmas[:, None, None, None]
)
UpperCAmelCase_ : Tuple = noise + original_samples
return noisy_samples
def __len__( self):
"""simple docstring"""
return self.config.num_train_timesteps
| 455 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _A :
lowercase__: int
lowercase__: int
class _A :
def __init__( self : Optional[int] , __magic_name__ : int ) -> Optional[int]:
"""simple docstring"""
__snake_case : list[list[Edge]] = [[] for _ in range(__magic_name__ )]
__snake_case : Tuple = size
def __getitem__( self : Optional[int] , __magic_name__ : int ) -> Iterator[Edge]:
"""simple docstring"""
return iter(self._graph[vertex] )
@property
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return self._size
def lowercase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(__magic_name__ , __magic_name__ ) )
def lowercase__ ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : int ) -> int | None:
"""simple docstring"""
__snake_case : Tuple = deque([start_vertex] )
__snake_case : list[int | None] = [None] * self.size
__snake_case : int = 0
while queue:
__snake_case : str = queue.popleft()
__snake_case : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__snake_case : int = current_distance + edge.weight
__snake_case : Optional[int] = distances[edge.destination_vertex]
if (
isinstance(__magic_name__ , __magic_name__ )
and new_distance >= dest_vertex_distance
):
continue
__snake_case : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 26 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
SCREAMING_SNAKE_CASE__ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
return torch.atana(__UpperCamelCase , __UpperCamelCase ) / math.pi * 2
def lowercase__ ( __UpperCamelCase )-> Tuple:
UpperCamelCase = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCamelCase , __UpperCamelCase )
class a_ ( lowerCamelCase ):
pass
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 )
UpperCamelCase = deepcopy(self.diffusion )
UpperCamelCase = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = MODELS_MAP[model_name]["""url"""]
os.system(F"wget {url} ./" )
return F"./{model_name}.ckpt"
SCREAMING_SNAKE_CASE__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
SCREAMING_SNAKE_CASE__ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
SCREAMING_SNAKE_CASE__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
SCREAMING_SNAKE_CASE__ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
SCREAMING_SNAKE_CASE__ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
SCREAMING_SNAKE_CASE__ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowercase__ ( __UpperCamelCase )-> List[str]:
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ):
return name.replace(__UpperCamelCase , __UpperCamelCase )
elif name.startswith(__UpperCamelCase ):
return [name.replace(__UpperCamelCase , __UpperCamelCase ) for v in value]
raise ValueError(F"Attn error with {name}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=13 )-> int:
UpperCamelCase = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
UpperCamelCase = 0
if string.startswith("""net.3.""" ):
depth += 1
UpperCamelCase = string[6:]
elif string.startswith("""net.""" ):
UpperCamelCase = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
UpperCamelCase = string[7:]
if string.startswith("""main.""" ):
UpperCamelCase = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase = string[:2]
UpperCamelCase = string[2:]
else:
UpperCamelCase = string[0]
UpperCamelCase = string[1:]
if depth == max_depth:
UpperCamelCase = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase = """mid_block"""
elif depth > 0 and int(__UpperCamelCase ) < 7:
UpperCamelCase = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase = F"down_blocks.{depth}"
elif depth > 0 and int(__UpperCamelCase ) > 7:
UpperCamelCase = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase = F"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
UpperCamelCase = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase = F"up_blocks.{max_depth - 1}" if int(__UpperCamelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." )
UpperCamelCase = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase = convert_resconv_naming(__UpperCamelCase )
elif "attentions" in new_layer:
UpperCamelCase = convert_attn_naming(__UpperCamelCase )
UpperCamelCase = new_string_left
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = prefix + """.""" + new_layer + """.""" + string_left
else:
UpperCamelCase = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowercase__ ( __UpperCamelCase )-> Tuple:
UpperCamelCase = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase = rename(__UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = transform_conv_attns(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
UpperCamelCase = v
return new_state_dict
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
if len(__UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase = v[:, :, 0]
else:
# bias
UpperCamelCase = v
else:
# qkv matrices
UpperCamelCase = v.shape[0]
UpperCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
UpperCamelCase = download(__UpperCamelCase )
UpperCamelCase = MODELS_MAP[model_name]["""sample_rate"""]
UpperCamelCase = MODELS_MAP[model_name]["""sample_size"""]
UpperCamelCase = Object()
UpperCamelCase = sample_size
UpperCamelCase = sample_rate
UpperCamelCase = 0
UpperCamelCase = UNetaDModel(sample_size=__UpperCamelCase , sample_rate=__UpperCamelCase )
UpperCamelCase = diffusers_model.state_dict()
UpperCamelCase = DiffusionUncond(__UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCamelCase )["""state_dict"""] )
UpperCamelCase = orig_model.diffusion_ema.eval()
UpperCamelCase = orig_model.state_dict()
UpperCamelCase = rename_orig_weights(__UpperCamelCase )
UpperCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCamelCase ) == 0, F"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("""kernel""" ) for k in list(__UpperCamelCase ) ), F"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
UpperCamelCase = value.squeeze()
UpperCamelCase = value
diffusers_model.load_state_dict(__UpperCamelCase )
UpperCamelCase = 100
UpperCamelCase = 33
UpperCamelCase = IPNDMScheduler(num_train_timesteps=__UpperCamelCase )
UpperCamelCase = torch.manual_seed(__UpperCamelCase )
UpperCamelCase = torch.randn([1, 2, config.sample_size] , generator=__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase = torch.linspace(1 , 0 , steps + 1 , device=__UpperCamelCase )[:-1]
UpperCamelCase = get_crash_schedule(__UpperCamelCase )
UpperCamelCase = DanceDiffusionPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
UpperCamelCase = torch.manual_seed(33 )
UpperCamelCase = pipe(num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase ).audios
UpperCamelCase = sampling.iplms_sample(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {} )
UpperCamelCase = generated.clamp(-1 , 1 )
UpperCamelCase = (generated - audio).abs().sum()
UpperCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __UpperCamelCase )
print("""Diff max""" , __UpperCamelCase )
assert diff_max < 1E-3, F"Diff max: {diff_max} is too much :-/"
print(F"Conversion for {model_name} successful!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 301 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Union[str, Any] = nn.functional.normalize(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = nn.functional.normalize(lowerCAmelCase__ )
return torch.mm(lowerCAmelCase__ , normalized_text_embeds.t() )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = CLIPConfig
lowerCAmelCase__ = ['CLIPEncoderLayer']
def __init__( self : Tuple , _A : CLIPConfig ):
'''simple docstring'''
super().__init__(_A )
UpperCAmelCase__ : Optional[Any] = CLIPVisionModel(config.vision_config )
UpperCAmelCase__ : Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_A )
UpperCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_A )
UpperCAmelCase__ : Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_A )
UpperCAmelCase__ : Union[str, Any] = nn.Parameter(torch.ones(17 ) , requires_grad=_A )
UpperCAmelCase__ : List[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=_A )
@torch.no_grad()
def lowercase_ ( self : str , _A : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.vision_model(_A )[1] # pooled_output
UpperCAmelCase__ : List[Any] = self.visual_projection(_A )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase__ : Any = cosine_distance(_A , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase__ : Any = cosine_distance(_A , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Any = image_embeds.shape[0]
for i in range(_A ):
UpperCAmelCase__ : int = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase__ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase__ : List[str] = special_cos_dist[i][concept_idx]
UpperCAmelCase__ : Tuple = self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase__ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCAmelCase__ : Any = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase__ : int = cos_dist[i][concept_idx]
UpperCAmelCase__ : Any = self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase__ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_A )
result.append(_A )
UpperCAmelCase__ : Any = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def lowercase_ ( self : Tuple , _A : torch.FloatTensor , _A : torch.FloatTensor ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.vision_model(_A )[1] # pooled_output
UpperCAmelCase__ : List[Any] = self.visual_projection(_A )
UpperCAmelCase__ : Tuple = cosine_distance(_A , self.special_care_embeds )
UpperCAmelCase__ : Optional[Any] = cosine_distance(_A , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase__ : Optional[int] = 0.0
UpperCAmelCase__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase__ : Optional[int] = torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase__ : int = special_care * 0.0_1
UpperCAmelCase__ : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase__ : List[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase__ : List[Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 312 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = 8.9_88e9 # units = N * m^s * C^-2
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict[str, float]:
UpperCAmelCase__ : List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
UpperCAmelCase__ : Optional[int] = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
UpperCAmelCase__ : List[Any] = abs(lowerCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
UpperCAmelCase__ : List[Any] = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 431 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __UpperCamelCase ( _A , _A=False ):
try:
lowerCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowerCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
lowerCAmelCase_ = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no." )
return _value
_A = parse_flag_from_env('''RUN_SLOW''', default=False)
_A = parse_flag_from_env('''RUN_REMOTE''', default=False)
_A = parse_flag_from_env('''RUN_LOCAL''', default=True)
_A = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
_A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
_A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
_A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
_A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
_A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
_A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
_A = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def __UpperCamelCase ( _A ):
try:
import faiss # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires faiss''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import regex # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires regex''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import elasticsearch # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires elasticsearch''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import sqlalchemy # noqa
except ImportError:
lowerCAmelCase_ = unittest.skip('''test requires sqlalchemy''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.TORCH_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires PyTorch''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.TF_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires TensorFlow''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.JAX_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires JAX''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not config.PIL_AVAILABLE:
lowerCAmelCase_ = unittest.skip('''test requires Pillow''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('''test requires transformers''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('''test requires tiktoken''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('''test requires spacy''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip('''test requires spacy''' )(_A )
except OSError:
return unittest.skip('''test requires spacy model \'{}\''''.format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def __UpperCamelCase ( _A ):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('''test requires pyspark''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('''test requires joblibspark''' )(_A )
else:
return test_case
def __UpperCamelCase ( _A ):
if not _run_slow_tests or _run_slow_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is slow''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not _run_local_tests or _run_local_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is local''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowerCAmelCase_ = unittest.skip('''test is packaged''' )(_A )
return test_case
def __UpperCamelCase ( _A ):
if not _run_remote_tests or _run_remote_tests == 0:
lowerCAmelCase_ = unittest.skip('''test requires remote''' )(_A )
return test_case
def __UpperCamelCase ( *_A ):
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith('''test''' ):
for decorator in decorators:
lowerCAmelCase_ = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class A ( __UpperCAmelCase ):
pass
class A ( __UpperCAmelCase ):
__snake_case = 0
__snake_case = 1
__snake_case = 2
@contextmanager
def __UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1E-1_6 ):
lowerCAmelCase_ = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
lowerCAmelCase_ = '''https://10.255.255.1'''
if kwargs.get('''timeout''' ) is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." )
lowerCAmelCase_ = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowerCAmelCase_ = url
lowerCAmelCase_ = e.args[0]
lowerCAmelCase_ = (max_retry_error.args[0].replace('''10.255.255.1''' , f"OfflineMock[{url}]" ),)
lowerCAmelCase_ = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError('''Offline mode is enabled.''' , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('''requests.Session.send''' , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('''requests.Session.request''' , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('''datasets.config.HF_DATASETS_OFFLINE''' , _A ):
yield
else:
raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' )
@contextmanager
def __UpperCamelCase ( *_A , **_A ):
lowerCAmelCase_ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def __UpperCamelCase ( ):
import gc
gc.collect()
lowerCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __UpperCamelCase ( ):
import gc
gc.collect()
lowerCAmelCase_ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __UpperCamelCase ( _A , _A ):
return deepcopy(_A ).integers(0 , 100 , 10 ).tolist() == deepcopy(_A ).integers(0 , 100 , 10 ).tolist()
def __UpperCamelCase ( _A ):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith('''500''' ) or str(_A ).startswith('''502''' ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = returncode
lowerCAmelCase_ = stdout
lowerCAmelCase_ = stderr
async def __UpperCamelCase ( _A , _A ):
while True:
lowerCAmelCase_ = await stream.readline()
if line:
callback(_A )
else:
break
async def __UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ):
if echo:
print('''\nRunning: ''' , ''' '''.join(_A ) )
lowerCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowerCAmelCase_ = []
lowerCAmelCase_ = []
def tee(_A , _A , _A , _A="" ):
lowerCAmelCase_ = line.decode('''utf-8''' ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label='''stdout:''' ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label='''stderr:''' ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def __UpperCamelCase ( _A , _A=None , _A=None , _A=180 , _A=False , _A=True ):
lowerCAmelCase_ = asyncio.get_event_loop()
lowerCAmelCase_ = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
lowerCAmelCase_ = ''' '''.join(_A )
if result.returncode > 0:
lowerCAmelCase_ = '''\n'''.join(result.stderr )
raise RuntimeError(
f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
f"The combined stderr from workers follows:\n{stderr}" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"'{cmd_str}' produced no output." )
return result
def __UpperCamelCase ( ):
lowerCAmelCase_ = os.environ.get('''PYTEST_XDIST_WORKER''' , '''gw0''' )
lowerCAmelCase_ = re.sub(r'''^gw''' , '''''' , _A , 0 , re.M )
return int(_A )
def __UpperCamelCase ( ):
lowerCAmelCase_ = 29500
lowerCAmelCase_ = pytest_xdist_worker_id()
return port + uniq_delta
| 431 | 1 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Dict = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (('num_inference_steps', 25),)
def _a (self , **lowercase ):
A_ : List[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**lowercase )
return config
def _a (self , lowercase=0 , **lowercase ):
A_ : List[Any] = dict(self.forward_default_kwargs )
A_ : Optional[Any] = kwargs.pop("""num_inference_steps""" , lowercase )
A_ : Dict = self.dummy_sample
A_ : Tuple = 0.1 * sample
A_ : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ : List[Any] = self.get_scheduler_config(**lowercase )
A_ : List[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : str = scheduler_class.from_pretrained(lowercase )
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residuals
A_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ : List[str] = sample, sample
for t in range(lowercase , time_step + scheduler.config.solver_order + 1 ):
A_ : str = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Tuple = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self , lowercase=0 , **lowercase ):
A_ : Optional[Any] = dict(self.forward_default_kwargs )
A_ : Optional[int] = kwargs.pop("""num_inference_steps""" , lowercase )
A_ : List[Any] = self.dummy_sample
A_ : Optional[int] = 0.1 * sample
A_ : List[str] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(lowercase )
# copy over dummy past residuals (must be after setting timesteps)
A_ : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase )
A_ : Optional[int] = scheduler_class.from_pretrained(lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase )
# copy over dummy past residual (must be after setting timesteps)
A_ : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
A_ : Optional[Any] = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Optional[Any] = new_scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a (self , lowercase=None , **lowercase ):
if scheduler is None:
A_ : List[str] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(**lowercase )
A_ : str = scheduler_class(**lowercase )
A_ : Optional[Any] = self.scheduler_classes[0]
A_ : int = self.get_scheduler_config(**lowercase )
A_ : Optional[Any] = scheduler_class(**lowercase )
A_ : List[str] = 10
A_ : List[Any] = self.dummy_model()
A_ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : List[str] = model(lowercase , lowercase )
A_ : str = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
return sample
def _a (self ):
A_ : Optional[Any] = dict(self.forward_default_kwargs )
A_ : str = kwargs.pop("""num_inference_steps""" , lowercase )
for scheduler_class in self.scheduler_classes:
A_ : Tuple = self.get_scheduler_config()
A_ : Optional[Any] = scheduler_class(**lowercase )
A_ : List[str] = self.dummy_sample
A_ : Union[str, Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase , """set_timesteps""" ):
scheduler.set_timesteps(lowercase )
elif num_inference_steps is not None and not hasattr(lowercase , """set_timesteps""" ):
A_ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A_ : Dict = [residual + 0.2, residual + 0.15, residual + 0.10]
A_ : str = dummy_past_residuals[: scheduler.config.solver_order]
A_ : Union[str, Any] = scheduler.timesteps[5]
A_ : Dict = scheduler.timesteps[6]
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a (self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
A_ : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
A_ : List[str] = self.full_loop(scheduler=lowercase )
A_ : Any = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
A_ : int = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
A_ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
A_ : str = UniPCMultistepScheduler.from_config(scheduler.config )
A_ : int = self.full_loop(scheduler=lowercase )
A_ : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def _a (self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def _a (self ):
self.check_over_configs(thresholding=lowercase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowercase , prediction_type=lowercase , sample_max_value=lowercase , solver_order=lowercase , solver_type=lowercase , )
def _a (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase )
def _a (self ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
A_ : Tuple = self.full_loop(
solver_order=lowercase , solver_type=lowercase , prediction_type=lowercase , )
assert not torch.isnan(lowercase ).any(), "Samples have nan numbers"
def _a (self ):
self.check_over_configs(lower_order_final=lowercase )
self.check_over_configs(lower_order_final=lowercase )
def _a (self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowercase , time_step=0 )
def _a (self ):
A_ : Optional[Any] = self.full_loop()
A_ : Optional[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.24_64 ) < 1E-3
def _a (self ):
A_ : Optional[Any] = self.full_loop(prediction_type="""v_prediction""" )
A_ : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_mean.item() - 0.10_14 ) < 1E-3
def _a (self ):
A_ : Any = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config(thresholding=lowercase , dynamic_thresholding_ratio=0 )
A_ : str = scheduler_class(**lowercase )
A_ : Tuple = 10
A_ : List[Any] = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowercase )
for i, t in enumerate(scheduler.timesteps ):
A_ : Optional[int] = model(lowercase , lowercase )
A_ : Tuple = scheduler.step(lowercase , lowercase , lowercase ).prev_sample
assert sample.dtype == torch.floataa
def _a (self , **lowercase ):
for scheduler_class in self.scheduler_classes:
A_ : Tuple = self.get_scheduler_config(**lowercase )
A_ : Dict = scheduler_class(**lowercase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps | 711 |
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowerCamelCase :int = logging.get_logger('''transformers.models.encodec''')
lowerCamelCase :int = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
lowerCamelCase :List[str] = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
lowerCamelCase :Union[str, Any] = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
lowerCamelCase :Dict = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
lowerCamelCase :int = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
lowerCamelCase :str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowerCamelCase :List[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowerCamelCase :Tuple = []
lowerCamelCase :Dict = []
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for attribute in key.split(""".""" ):
A_ : Optional[Any] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
A_ : Optional[int] = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
A_ : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
A_ : Optional[int] = value
elif weight_type == "weight_g":
A_ : Optional[int] = value
elif weight_type == "weight_v":
A_ : Dict = value
elif weight_type == "bias":
A_ : Dict = value
elif weight_type == "running_mean":
A_ : Optional[Any] = value
elif weight_type == "running_var":
A_ : int = value
elif weight_type == "num_batches_tracked":
A_ : Optional[Any] = value
elif weight_type == "weight_ih_l0":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l0":
A_ : Union[str, Any] = value
elif weight_type == "bias_ih_l0":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l0":
A_ : Tuple = value
elif weight_type == "weight_ih_l1":
A_ : Optional[int] = value
elif weight_type == "weight_hh_l1":
A_ : Dict = value
elif weight_type == "bias_ih_l1":
A_ : Optional[int] = value
elif weight_type == "bias_hh_l1":
A_ : Tuple = value
else:
A_ : Any = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A_, A_ : List[str] = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = []
if model_name == "encodec_24khz" or "encodec_32khz":
A_ : List[str] = MAPPING_24K
elif model_name == "encodec_48khz":
A_ : str = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase__ , lowerCamelCase__ ):
logger.info(f'{name} was ignored' )
continue
A_ : str = False
for key, mapped_key in MAPPING.items():
if "*" in key:
A_, A_ : List[Any] = key.split(""".*.""" )
if prefix in name and suffix in name:
A_ : Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
A_ : Union[str, Any] = True
if "*" in mapped_key:
A_ : int = name.split(lowerCamelCase__ )[0].split(""".""" )[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase__ )
if "weight_g" in name:
A_ : Any = """weight_g"""
elif "weight_v" in name:
A_ : Tuple = """weight_v"""
elif "weight_ih_l0" in name:
A_ : Union[str, Any] = """weight_ih_l0"""
elif "weight_hh_l0" in name:
A_ : Tuple = """weight_hh_l0"""
elif "bias_ih_l0" in name:
A_ : str = """bias_ih_l0"""
elif "bias_hh_l0" in name:
A_ : List[Any] = """bias_hh_l0"""
elif "weight_ih_l1" in name:
A_ : Dict = """weight_ih_l1"""
elif "weight_hh_l1" in name:
A_ : Any = """weight_hh_l1"""
elif "bias_ih_l1" in name:
A_ : Optional[int] = """bias_ih_l1"""
elif "bias_hh_l1" in name:
A_ : List[Any] = """bias_hh_l1"""
elif "bias" in name:
A_ : List[str] = """bias"""
elif "weight" in name:
A_ : Optional[int] = """weight"""
elif "running_mean" in name:
A_ : Union[str, Any] = """running_mean"""
elif "running_var" in name:
A_ : Optional[int] = """running_var"""
elif "num_batches_tracked" in name:
A_ : List[Any] = """num_batches_tracked"""
else:
A_ : str = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , ):
'''simple docstring'''
if config_path is not None:
A_ : Any = EncodecConfig.from_pretrained(lowerCamelCase__ )
else:
A_ : Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
A_ : Dict = [8, 5, 4, 4]
A_ : Optional[Any] = [2.2]
A_ : Tuple = 64
A_ : Tuple = 3_20_00
A_ : List[Any] = 20_48
A_ : Optional[Any] = False
A_ : str = False
A_ : Optional[int] = False
elif model_name == "encodec_48khz":
A_ : Dict = [8, 5, 4, 2]
A_ : Tuple = [3.0, 6.0, 12.0, 24.0]
A_ : List[Any] = 4_80_00
A_ : Dict = 2
A_ : Dict = False
A_ : Dict = """time_group_norm"""
A_ : Optional[Any] = True
A_ : str = 1.0
A_ : Any = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
A_ : Dict = EncodecModel(lowerCamelCase__ )
A_ : Any = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowerCamelCase__ )
A_ : int = torch.load(lowerCamelCase__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
A_ : Tuple = original_checkpoint["""best_state"""]
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowerCamelCase__ )
model.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCamelCase :Dict = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
) | 686 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
A : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''')
@require_sentencepiece
@require_tokenizers
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Any = SpeechTaTokenizer
__lowerCamelCase : str = False
__lowerCamelCase : int = True
def a_ ( self : Dict ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = SpeechTaTokenizer(__lowerCAmelCase )
A__ = AddedToken("""<mask>""" , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )
A__ = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
A__ = """this is a test"""
A__ = """this is a test"""
return input_text, output_text
def a_ ( self : Optional[int] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Optional[Any]=20 , __lowerCAmelCase : List[Any]=5 ) -> Any:
"""simple docstring"""
A__ , A__ = self.get_input_output_texts(__lowerCAmelCase )
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def a_ ( self : List[str] ) -> Any:
"""simple docstring"""
A__ = """<pad>"""
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(__lowerCAmelCase ) , 81 )
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def a_ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
A__ = self.get_tokenizers(do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = tokenizer.vocab_size
A__ = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
A__ = tokenizer.add_tokens(__lowerCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size + len(__lowerCAmelCase ) )
A__ = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
A__ = tokenizer.add_special_tokens(__lowerCAmelCase )
A__ = tokenizer.vocab_size
A__ = len(__lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , 0 )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , len(__lowerCAmelCase ) )
self.assertEqual(__lowerCAmelCase , all_size_a + len(__lowerCAmelCase ) )
A__ = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=__lowerCAmelCase )
self.assertGreaterEqual(len(__lowerCAmelCase ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def a_ ( self : str ) -> int:
"""simple docstring"""
pass
def a_ ( self : int ) -> int:
"""simple docstring"""
pass
def a_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(__lowerCAmelCase , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
A__ = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
A__ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# fmt: off
self.assertListEqual(__lowerCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A__ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def a_ ( self : str ) -> int:
"""simple docstring"""
A__ = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
A__ = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=__lowerCAmelCase , )
| 176 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Optional[Any] = '''pt'''
elif is_tf_available():
A : List[Any] = '''tf'''
else:
A : Union[str, Any] = '''jax'''
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = ByTaTokenizer
__lowerCamelCase : Tuple = False
def a_ ( self : Dict ) -> Dict:
"""simple docstring"""
super().setUp()
A__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def a_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained("""google/byt5-small""" )
def a_ ( self : Union[str, Any] , **__lowerCAmelCase : int ) -> ByTaTokenizer:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Union[str, Any]=False , __lowerCAmelCase : Dict=20 , __lowerCAmelCase : List[str]=5 ) -> Tuple[str, list]:
"""simple docstring"""
A__ = []
for i in range(len(__lowerCAmelCase ) ):
try:
A__ = tokenizer.decode([i] , clean_up_tokenization_spaces=__lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
A__ = list(filter(lambda __lowerCAmelCase : re.match(R"""^[ a-zA-Z]+$""" , t[1] ) , __lowerCAmelCase ) )
A__ = list(filter(lambda __lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowerCAmelCase ) , __lowerCAmelCase ) )
if max_length is not None and len(__lowerCAmelCase ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(__lowerCAmelCase ) < min_length and len(__lowerCAmelCase ) > 0:
while len(__lowerCAmelCase ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
if " " not in output_txt and len(__lowerCAmelCase ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowerCAmelCase )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowerCAmelCase )
)
if with_prefix_space:
A__ = """ """ + output_txt
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
return output_txt, output_ids
def a_ ( self : List[str] ) -> int:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] )
A__ = tokenizer(["""hi""", """I went to the gym""", """"""] )
self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = """Unicode €."""
A__ = tokenizer(__lowerCAmelCase )
A__ = [88, 1_13, 1_08, 1_02, 1_14, 1_03, 1_04, 35, 2_29, 1_33, 1_75, 49, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """Unicode €.</s>""" )
A__ = tokenizer("""e è é ê ë""" )
A__ = [1_04, 35, 1_98, 1_71, 35, 1_98, 1_72, 35, 1_98, 1_73, 35, 1_98, 1_74, 1]
self.assertEqual(encoded["""input_ids"""] , __lowerCAmelCase )
# decoding
A__ = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , """e è é ê ë</s>""" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" )
def a_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 1, 0]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
if FRAMEWORK != "jax":
A__ = list(batch.input_ids.numpy()[0] )
else:
A__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("""input_ids""" , __lowerCAmelCase )
self.assertIn("""attention_mask""" , __lowerCAmelCase )
self.assertNotIn("""decoder_input_ids""" , __lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = [
"""Summary of the text.""",
"""Another summary.""",
]
A__ = tokenizer(
text_target=__lowerCAmelCase , max_length=32 , padding="""max_length""" , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def a_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.ta_base_tokenizer
A__ = ["""A long paragraph for summarization. </s>"""]
A__ = ["""Summary of the text. </s>"""]
# fmt: off
A__ = [68, 35, 1_11, 1_14, 1_13, 1_06, 35, 1_15, 1_00, 1_17, 1_00, 1_06, 1_17, 1_00, 1_15, 1_07, 35, 1_05, 1_14, 1_17, 35, 1_18, 1_20, 1_12, 1_12, 1_00, 1_17, 1_08, 1_25, 1_00, 1_19, 1_08, 1_14, 1_13, 49, 35, 1]
A__ = [86, 1_20, 1_12, 1_12, 1_00, 1_17, 1_24, 35, 1_14, 1_05, 35, 1_19, 1_07, 1_04, 35, 1_19, 1_04, 1_23, 1_19, 49, 35, 1]
# fmt: on
A__ = tokenizer(__lowerCAmelCase , text_target=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , batch["""input_ids"""][0] )
self.assertEqual(__lowerCAmelCase , batch["""labels"""][0] )
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
shutil.rmtree(__lowerCAmelCase )
A__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
A__ = tempfile.mkdtemp()
A__ = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(["""bim""", """bambam"""] )
A__ = tokenizer.additional_special_tokens
additional_special_tokens.append("""new_additional_special_token""" )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
A__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase )
A__ = after_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A__ = tokenizer.__class__.from_pretrained(__lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
A__ = json.load(__lowerCAmelCase )
A__ = [f'<extra_id_{i}>' for i in range(1_25 )]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
A__ = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(__lowerCAmelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
with open(os.path.join(__lowerCAmelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , )
self.assertIn(
"""an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A__ = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=__lowerCAmelCase )]
A__ = tokenizer_class.from_pretrained(
__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , )
self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens )
self.assertEqual(
["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , )
def a_ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
A__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowerCAmelCase )
A__ = tokenizer_class.from_pretrained(__lowerCAmelCase )
self.assertTrue(tokenizer.decode([2_55] ) == """""" )
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
pass
def a_ ( self : int ) -> Dict:
"""simple docstring"""
A__ = self.get_tokenizers(fast=__lowerCAmelCase , do_lower_case=__lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = ["""t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """x""", """t""", """</s>"""]
A__ = tokenizer.convert_tokens_to_string(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
A__ = 0
A__ = tokenizer.convert_ids_to_tokens(
__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
for attr in attributes_list:
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , attr + """_id""" , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(getattr(__lowerCAmelCase , attr + """_id""" ) , __lowerCAmelCase )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [] )
setattr(__lowerCAmelCase , """additional_special_tokens_ids""" , [token_id_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens""" ) , [token_to_test_setters] )
self.assertListEqual(getattr(__lowerCAmelCase , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
| 176 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_snake_case = True
except ImportError:
_snake_case = False
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( _lowercase ) -> Tuple:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser ):
"""simple docstring"""
UpperCamelCase = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' , action='store_true' , help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' , type=SCREAMING_SNAKE_CASE__ , help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' , type=SCREAMING_SNAKE_CASE__ , help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=None , *SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
UpperCamelCase = testing
UpperCamelCase = testing_file
UpperCamelCase = path
def __lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
UpperCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
UpperCamelCase = (
Path(SCREAMING_SNAKE_CASE__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
UpperCamelCase = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(SCREAMING_SNAKE_CASE__ ) )
else:
with open(self._testing_file , 'r' ) as configuration_file:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=SCREAMING_SNAKE_CASE__ , extra_context=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' , 'r' ) as configuration_file:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = configuration['lowercase_modelname']
UpperCamelCase = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
UpperCamelCase = 'PyTorch' in generate_tensorflow_pytorch_and_flax
UpperCamelCase = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
UpperCamelCase = 'Flax' in generate_tensorflow_pytorch_and_flax
UpperCamelCase = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=SCREAMING_SNAKE_CASE__ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , 'w' ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(SCREAMING_SNAKE_CASE__ : Optional[int] ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
UpperCamelCase = f.readlines()
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(SCREAMING_SNAKE_CASE__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str] ):
# Create temp file
UpperCamelCase , UpperCamelCase = mkstemp()
UpperCamelCase = False
with fdopen(SCREAMING_SNAKE_CASE__ , 'w' ) as new_file:
with open(SCREAMING_SNAKE_CASE__ ) as old_file:
for line in old_file:
new_file.write(SCREAMING_SNAKE_CASE__ )
if line_to_copy_below in line:
UpperCamelCase = True
for line_to_copy in lines_to_copy:
new_file.write(SCREAMING_SNAKE_CASE__ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Remove original file
remove(SCREAMING_SNAKE_CASE__ )
# Move new file
move(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def skip_units(SCREAMING_SNAKE_CASE__ : int ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(SCREAMING_SNAKE_CASE__ : Dict ):
with open(SCREAMING_SNAKE_CASE__ ) as datafile:
UpperCamelCase = []
UpperCamelCase = False
UpperCamelCase = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
UpperCamelCase = line.split('"' )[1]
UpperCamelCase = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# Below: " in line and "##" not in line:
UpperCamelCase = line.split('"' )[1]
UpperCamelCase = skip_units(SCREAMING_SNAKE_CASE__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCamelCase = []
elif "# Replace with" in line and "##" not in line:
UpperCamelCase = []
elif "##" not in line:
lines_to_copy.append(SCREAMING_SNAKE_CASE__ )
remove(SCREAMING_SNAKE_CASE__ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(SCREAMING_SNAKE_CASE__ )
| 170 |
from math import factorial, pi
def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
UpperCamelCase = float(_lowercase )
UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
UpperCamelCase = float(_lowercase )
UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 170 | 1 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowercase__ = TypeVar("KEY")
lowercase__ = TypeVar("VAL")
@dataclass(frozen=__snake_case , slots=__snake_case )
class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE__ ( _Item ):
def __init__(self ):
'''simple docstring'''
super().__init__(_lowercase , _lowercase )
def __bool__(self ):
'''simple docstring'''
return False
lowercase__ = _DeletedItem()
class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ):
def __init__(self , _lowercase = 8 , _lowercase = 0.75 ):
'''simple docstring'''
__a : Optional[int] = initial_block_size
__a : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__a : List[Any] = capacity_factor
__a : Optional[int] = 0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return hash(_lowercase ) % len(self._buckets )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self._buckets[ind]
if not stored:
__a : List[Any] = _Item(_lowercase , _lowercase )
self._len += 1
return True
elif stored.key == key:
__a : Any = _Item(_lowercase , _lowercase )
return True
else:
return False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__a : Dict = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = self._buckets
__a : List[Any] = [None] * new_size
__a : str = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCAmelCase__(self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self._get_bucket_index(_lowercase )
for _ in range(len(self._buckets ) ):
yield ind
__a : Optional[int] = self._get_next_ind(_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
for ind in self._iterate_buckets(_lowercase ):
if self._try_set(_lowercase , _lowercase , _lowercase ):
break
def __setitem__(self , _lowercase , _lowercase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(_lowercase , _lowercase )
def __delitem__(self , _lowercase ):
'''simple docstring'''
for ind in self._iterate_buckets(_lowercase ):
__a : int = self._buckets[ind]
if item is None:
raise KeyError(_lowercase )
if item is _deleted:
continue
if item.key == key:
__a : Optional[int] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , _lowercase ):
'''simple docstring'''
for ind in self._iterate_buckets(_lowercase ):
__a : Optional[int] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_lowercase )
def __len__(self ):
'''simple docstring'''
return self._len
def __iter__(self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ):
'''simple docstring'''
__a : Optional[int] = """ ,""".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 581 |
"""simple docstring"""
from __future__ import annotations
import math
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : int ):
__a : Tuple = u
for i in range(1 , _lowerCamelCase ):
__a : List[str] = temp * (u - i)
return temp
def __magic_name__ ( ):
__a : Dict = int(input("""enter the numbers of values: """ ) )
__a : list[list[float]] = []
for _ in range(_lowerCamelCase ):
y.append([] )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
y[i].append(_lowerCamelCase )
__a : Any = 0
print("""enter the values of parameters in a list: """ )
__a : List[str] = list(map(_lowerCamelCase , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(_lowerCamelCase ):
__a : str = float(input() )
__a : List[Any] = int(input("""enter the value to interpolate: """ ) )
__a : Union[str, Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowerCamelCase ):
for j in range(n - i ):
__a : List[str] = y[j + 1][i - 1] - y[j][i - 1]
__a : str = y[0][0]
for i in range(1 , _lowerCamelCase ):
summ += (ucal(_lowerCamelCase , _lowerCamelCase ) * y[0][i]) / math.factorial(_lowerCamelCase )
print(F'''the value at {value} is {summ}''' )
if __name__ == "__main__":
main()
| 581 | 1 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = to_pil_image(UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = pil_image.size
UpperCamelCase__ : str = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='''dict''' , config=UpperCamelCase__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
UpperCamelCase__ : Optional[Any] = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
UpperCamelCase__ : int = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : int = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[Any] = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[Any] = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase__ : Dict = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : str = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
UpperCamelCase__ : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_5_5 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "" , **__SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
UpperCamelCase__ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : Optional[int] = size
UpperCamelCase__ : Optional[Any] = resample
UpperCamelCase__ : Optional[Any] = do_rescale
UpperCamelCase__ : str = rescale_value
UpperCamelCase__ : Any = do_normalize
UpperCamelCase__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : List[Any] = ocr_lang
UpperCamelCase__ : str = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase__ : int = (size['''height'''], size['''width'''])
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) -> np.ndarray:
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = size if size is not None else self.size
UpperCamelCase__ : Dict = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ : Tuple = image_std if image_std is not None else self.image_std
UpperCamelCase__ : Optional[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : List[str] = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : List[Any] = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : List[Any] = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ ,UpperCamelCase__ : Tuple = apply_tesseract(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
words_batch.append(__SCREAMING_SNAKE_CASE )
boxes_batch.append(__SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : str = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : List[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase__ : str = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : int = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : List[Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=__SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : str = words_batch
UpperCamelCase__ : Union[str, Any] = boxes_batch
return data
| 462 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _lowerCamelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=3_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=2 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[int] = patch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[Any] = is_training
UpperCamelCase__ : Dict = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : int = num_attention_heads
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[int] = initializer_range
UpperCamelCase__ : List[str] = scope
UpperCamelCase__ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCamelCase__ : Any = (image_size // patch_size) ** 2
UpperCamelCase__ : Any = num_patches + 2
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase__ : Tuple = TFDeiTModel(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : str = TFDeiTForMaskedImageModeling(config=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCamelCase__ : Optional[int] = 1
UpperCamelCase__ : Any = TFDeiTForMaskedImageModeling(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Any = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.type_sequence_label_size
UpperCamelCase__ : Optional[Any] = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : int = 1
UpperCamelCase__ : int = TFDeiTForImageClassification(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Optional[Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = config_and_inputs
UpperCamelCase__ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = TFDeiTModelTester(self )
UpperCamelCase__ : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : str = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Any = TFDeiTModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( ):
UpperCamelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
UpperCamelCase__ : str = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCamelCase__ : Union[str, Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 462 | 1 |
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class _lowercase :
def __init__( self , A__ , A__ ) -> Tuple:
snake_case = question_encoder
snake_case = generator
snake_case = self.question_encoder
def UpperCamelCase ( self , A__ ) -> int:
if os.path.isfile(A__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(A__ , exist_ok=A__ )
snake_case = os.path.join(A__ , '''question_encoder_tokenizer''' )
snake_case = os.path.join(A__ , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(A__ )
self.generator.save_pretrained(A__ )
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case = kwargs.pop('''config''' , A__ )
if config is None:
snake_case = RagConfig.from_pretrained(A__ )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
snake_case = AutoTokenizer.from_pretrained(
A__ , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=A__ , generator=A__ )
def __call__( self , *A__ , **A__ ) -> Any:
return self.current_tokenizer(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> Tuple:
return self.generator.decode(*A__ , **A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.question_encoder
def UpperCamelCase ( self ) -> str:
snake_case = self.generator
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , A__ = "longest" , A__ = None , A__ = True , **A__ , ) -> BatchEncoding:
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , A__ , )
if max_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
A__ , add_special_tokens=A__ , return_tensors=A__ , max_length=A__ , padding=A__ , truncation=A__ , **A__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case = self.current_tokenizer.model_max_length
snake_case = self(
text_target=A__ , add_special_tokens=A__ , return_tensors=A__ , padding=A__ , max_length=A__ , truncation=A__ , **A__ , )
snake_case = labels['''input_ids''']
return model_inputs
| 342 |
'''simple docstring'''
def __UpperCamelCase ( a : Optional[int] , a : int , a : str=False ) ->List[str]:
if isinstance(a , a ) and isinstance(a , a ):
snake_case = len(set_a.intersection(a ) )
if alternative_union:
snake_case = len(a ) + len(a )
else:
snake_case = len(set_a.union(a ) )
return intersection / union
if isinstance(a , (list, tuple) ) and isinstance(a , (list, tuple) ):
snake_case = [element for element in set_a if element in set_b]
if alternative_union:
snake_case = len(a ) + len(a )
return len(a ) / union
else:
snake_case = set_a + [element for element in set_b if element not in set_a]
return len(a ) / len(a )
return len(a ) / len(a )
return None
if __name__ == "__main__":
_lowercase = {'a', 'b', 'c', 'd', 'e'}
_lowercase = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 342 | 1 |
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
__A : Tuple = 1
@register_to_config
def __init__( self : Tuple , _A : List[str]=2000 , _A : List[Any]=0.1 , _A : List[str]=20 , _A : Any=1e-3):
A__ : List[str] = None
A__ : Tuple = None
A__ : Union[str, Any] = None
def _lowercase ( self : Dict , _A : Optional[int] , _A : Union[str, torch.device] = None):
A__ : List[str] = torch.linspace(1 , self.config.sampling_eps , _A , device=_A)
def _lowercase ( self : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : List[str] , _A : Union[str, Any]=None):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler")
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
A__ : str = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
A__ : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff))
A__ : int = std.flatten()
while len(std.shape) < len(score.shape):
A__ : Optional[int] = std.unsqueeze(-1)
A__ : Dict = -score / std
# compute
A__ : Union[str, Any] = -1.0 / len(self.timesteps)
A__ : Optional[Any] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
A__ : Any = beta_t.flatten()
while len(beta_t.shape) < len(x.shape):
A__ : List[str] = beta_t.unsqueeze(-1)
A__ : Union[str, Any] = -0.5 * beta_t * x
A__ : Tuple = torch.sqrt(_A)
A__ : str = drift - diffusion**2 * score
A__ : Tuple = x + drift * dt
# add noise
A__ : Tuple = randn_tensor(x.shape , layout=x.layout , generator=_A , device=x.device , dtype=x.dtype)
A__ : Tuple = x_mean + diffusion * math.sqrt(-dt) * noise
return x, x_mean
def __len__( self : Tuple):
return self.config.num_train_timesteps | 714 |
def snake_case__ ( __lowercase , __lowercase , __lowercase = 0 , __lowercase = 0 ) -> int:
"""simple docstring"""
A__ : Tuple = right or len(__lowercase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(__lowercase , __lowercase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _UpperCamelCase ( _A , _A , _A , _A , _A , _A = None , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = {}
if train_file is not None:
_UpperCAmelCase = [train_file]
if eval_file is not None:
_UpperCAmelCase = [eval_file]
if test_file is not None:
_UpperCAmelCase = [test_file]
_UpperCAmelCase = datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
_UpperCAmelCase = list(ds[list(files.keys() )[0]].features.keys() )
_UpperCAmelCase = features_name.pop(_lowerCamelCase )
_UpperCAmelCase = list(set(ds[list(files.keys() )[0]][label_name] ) )
_UpperCAmelCase = {label: i for i, label in enumerate(_lowerCamelCase )}
_UpperCAmelCase = tokenizer.model_input_names
_UpperCAmelCase = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
_UpperCAmelCase = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
_UpperCAmelCase = ds[k].map(
lambda _A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_UpperCAmelCase = {k: v for k, v in ex.items() if k in input_names}
_UpperCAmelCase = labelaid[ex[label_name]]
yield (d, label)
_UpperCAmelCase = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_UpperCAmelCase = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_UpperCAmelCase = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_UpperCAmelCase = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_UpperCAmelCase = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_UpperCAmelCase = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a : List[str] = logging.getLogger(__name__)
@dataclass
class a_ :
a : int = field(metadata={'help': 'Which column contains the label'} )
a : str = field(default=lowerCamelCase__ , metadata={'help': 'The path of the training file'} )
a : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'The path of the development file'} )
a : Optional[str] = field(default=lowerCamelCase__ , metadata={'help': 'The path of the test file'} )
a : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : bool = field(
default=lowerCamelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class a_ :
a : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a : bool = field(default=lowerCamelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a : Optional[str] = field(
default=lowerCamelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_UpperCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
F"""16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_UpperCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_A ) -> Dict:
_UpperCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_UpperCAmelCase = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main() | 555 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
UpperCamelCase__ : Union[str, Any] = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase__ : Dict=-1 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = label_idx
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[Split, str] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Any = mode.value
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCAmelCase__ , F"{mode}.txt" )
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : int = []
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : str = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=lowerCAmelCase__ , labels=lowerCAmelCase__ ) )
guid_index += 1
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : List[Any] = []
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = line.split(""" """ )
words.append(splits[0] )
if len(lowerCAmelCase__ ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=lowerCAmelCase__ , labels=lowerCAmelCase__ ) )
return examples
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : TextIO , lowerCAmelCase__ : TextIO , lowerCAmelCase__ : List ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(lowerCAmelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
__SCREAMING_SNAKE_CASE : List[str] = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(lowerCAmelCase__ )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : str ):
"""simple docstring"""
if path:
with open(lowerCAmelCase__ , """r""" ) as f:
__SCREAMING_SNAKE_CASE : Any = f.read().splitlines()
if "O" not in labels:
__SCREAMING_SNAKE_CASE : Union[str, Any] = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self : List[str] ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : str ):
"""simple docstring"""
if path:
with open(lowerCAmelCase__ , """r""" ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.read().splitlines()
if "O" not in labels:
__SCREAMING_SNAKE_CASE : Tuple = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[Split, str] ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : List[str] = mode.value
__SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCAmelCase__ , F"{mode}.txt" )
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = []
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f:
for sentence in parse_incr(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=lowerCAmelCase__ , labels=lowerCAmelCase__ ) )
guid_index += 1
return examples
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : TextIO , lowerCAmelCase__ : TextIO , lowerCAmelCase__ : List ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for sentence in parse_incr(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = preds_list[example_id]
__SCREAMING_SNAKE_CASE : Union[str, Any] = """"""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(lowerCAmelCase__ )
example_id += 1
def UpperCamelCase__ ( self : str , lowerCAmelCase__ : str ):
"""simple docstring"""
if path:
with open(lowerCAmelCase__ , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 578 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case__(snake_case__ ):
"""simple docstring"""
lowercase_ = '''realm'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]=30_522 , SCREAMING_SNAKE_CASE : Any=768 , SCREAMING_SNAKE_CASE : int=128 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Dict=8 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Tuple="gelu_new" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : str=512 , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE : List[str]=1E-1_2 , SCREAMING_SNAKE_CASE : Tuple=256 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : Tuple=1E-3 , SCREAMING_SNAKE_CASE : str=5 , SCREAMING_SNAKE_CASE : Union[str, Any]=320 , SCREAMING_SNAKE_CASE : Dict=13_353_718 , SCREAMING_SNAKE_CASE : int=5_000 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : Dict=2 , **SCREAMING_SNAKE_CASE : Tuple , ):
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
# Common config
lowercase__ : Union[str, Any] = vocab_size
lowercase__ : Optional[int] = max_position_embeddings
lowercase__ : Dict = hidden_size
lowercase__ : Tuple = retriever_proj_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Tuple = num_attention_heads
lowercase__ : Dict = num_candidates
lowercase__ : Dict = intermediate_size
lowercase__ : int = hidden_act
lowercase__ : List[str] = hidden_dropout_prob
lowercase__ : Any = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Any = type_vocab_size
lowercase__ : Dict = layer_norm_eps
# Reader config
lowercase__ : int = span_hidden_size
lowercase__ : Union[str, Any] = max_span_width
lowercase__ : List[Any] = reader_layer_norm_eps
lowercase__ : str = reader_beam_size
lowercase__ : Any = reader_seq_len
# Retrieval config
lowercase__ : Optional[Any] = num_block_records
lowercase__ : int = searcher_beam_size
| 711 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 0 |
'''simple docstring'''
import math
import sys
def _SCREAMING_SNAKE_CASE ( lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if number != int(UpperCamelCase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A: Optional[int] = [-1] * (number + 1)
A: Tuple = 0
for i in range(1 , number + 1 ):
A: List[Any] = sys.maxsize
A: Union[str, Any] = int(math.sqrt(UpperCamelCase__ ) )
for j in range(1 , root + 1 ):
A: str = 1 + answers[i - (j**2)]
A: str = min(UpperCamelCase__ , UpperCamelCase__ )
A: Dict = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_UpperCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
_UpperCAmelCase : Union[str, Any] = {"""facebook/blenderbot_small-90M""": 512}
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
snake_case_ = set(UpperCamelCase__ )
return pairs
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case , snake_case , snake_case="__start__" , snake_case="__end__" , snake_case="__unk__" , snake_case="__null__" , **snake_case , ):
super().__init__(unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , pad_token=snake_case , **snake_case )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
snake_case_ = json.load(snake_case )
snake_case_ = {v: k for k, v in self.encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
snake_case_ = merges_handle.read().split('\n' )[1:-1]
snake_case_ = [tuple(merge.split() ) for merge in merges]
snake_case_ = dict(zip(snake_case , range(len(snake_case ) ) ) )
snake_case_ = {}
@property
def a ( self ):
return len(self.encoder )
def a ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def a ( self , snake_case ):
if token in self.cache:
return self.cache[token]
snake_case_ = re.sub('([.,!?()])' , R' \1' , snake_case )
snake_case_ = re.sub('(\')' , R' \1 ' , snake_case )
snake_case_ = re.sub(R'\s{2,}' , ' ' , snake_case )
if "\n" in token:
snake_case_ = token.replace('\n' , ' __newln__' )
snake_case_ = token.split(' ' )
snake_case_ = []
for token in tokens:
if not len(snake_case ):
continue
snake_case_ = token.lower()
snake_case_ = tuple(snake_case )
snake_case_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
snake_case_ = get_pairs(snake_case )
if not pairs:
words.append(snake_case )
continue
while True:
snake_case_ = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(snake_case ):
try:
snake_case_ = word.index(snake_case , snake_case )
new_word.extend(word[i:j] )
snake_case_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ = tuple(snake_case )
snake_case_ = new_word
if len(snake_case ) == 1:
break
else:
snake_case_ = get_pairs(snake_case )
snake_case_ = '@@ '.join(snake_case )
snake_case_ = word[:-4]
snake_case_ = word
words.append(snake_case )
return " ".join(snake_case )
def a ( self , snake_case ):
snake_case_ = []
snake_case_ = re.findall(R'\S+\n?' , snake_case )
for token in words:
split_tokens.extend(list(self.bpe(snake_case ).split(' ' ) ) )
return split_tokens
def a ( self , snake_case ):
snake_case_ = token.lower()
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def a ( self , snake_case ):
return self.decoder.get(snake_case , self.unk_token )
def a ( self , snake_case ):
snake_case_ = ' '.join(snake_case ).replace('@@ ' , '' ).strip()
return out_string
def a ( self , snake_case , snake_case = None ):
if not os.path.isdir(snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
snake_case_ = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
snake_case_ = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
| 362 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Union[str, Any] = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 711 |
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__snake_case = math.sqrt(_UpperCAmelCase )
__snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> np.ndarray:
__snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCAmelCase ( _UpperCAmelCase : int , _UpperCAmelCase : float ) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _UpperCAmelCase ):
for j in range(0 , _UpperCAmelCase ):
__snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : int , ) -> np.ndarray:
__snake_case = np.zeros(img.shape )
__snake_case = get_gauss_kernel(_UpperCAmelCase , _UpperCAmelCase )
__snake_case , __snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__snake_case = get_slice(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
__snake_case = vec_gaussian(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.multiply(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = np.sum(_UpperCAmelCase ) / np.sum(_UpperCAmelCase )
__snake_case = val
return imga
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> tuple:
__snake_case = args[1] if args[1:] else "../image_data/lena.jpg"
__snake_case = float(args[2] ) if args[2:] else 1.0
__snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__snake_case = int(args[4] )
__snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
__snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a , a , a , a : Tuple = parse_args(sys.argv)
a : Tuple = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : Dict = img / 255
a : str = out.astype('''float32''')
a : Union[str, Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : Dict = out * 255
a : List[str] = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 680 | 0 |
'''simple docstring'''
import random
from typing import Any
def a_ ( _lowerCAmelCase ) -> list[Any]:
for _ in range(len(_lowerCAmelCase ) ):
__lowerCamelCase : Optional[Any] = random.randint(0 ,len(_lowerCAmelCase ) - 1 )
__lowerCamelCase : str = random.randint(0 ,len(_lowerCAmelCase ) - 1 )
__lowerCamelCase ,__lowerCamelCase : List[Any] = data[b], data[a]
return data
if __name__ == "__main__":
_UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
_UpperCamelCase = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 459 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ ="""pegasus"""
a_ =["""past_key_values"""]
a_ ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Any , _a : int=5_0265 , _a : Tuple=1024 , _a : Optional[Any]=12 , _a : int=4096 , _a : str=16 , _a : Union[str, Any]=12 , _a : Optional[Any]=4096 , _a : Optional[int]=16 , _a : Union[str, Any]=0.0 , _a : str=0.0 , _a : List[Any]=True , _a : Optional[int]=True , _a : Tuple="gelu" , _a : Dict=1024 , _a : int=0.1 , _a : Any=0.0 , _a : Union[str, Any]=0.0 , _a : Any=0.02 , _a : Optional[Any]=0 , _a : str=False , _a : Tuple=0 , _a : Optional[int]=1 , _a : Union[str, Any]=1 , **_a : Optional[int] , ) -> Any:
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : Union[str, Any] = encoder_ffn_dim
__lowerCamelCase : Union[str, Any] = encoder_layers
__lowerCamelCase : List[Any] = encoder_attention_heads
__lowerCamelCase : Optional[int] = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : Tuple = decoder_attention_heads
__lowerCamelCase : Optional[Any] = dropout
__lowerCamelCase : Tuple = attention_dropout
__lowerCamelCase : int = activation_dropout
__lowerCamelCase : Dict = activation_function
__lowerCamelCase : List[Any] = init_std
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : Union[str, Any] = decoder_layerdrop
__lowerCamelCase : Union[str, Any] = use_cache
__lowerCamelCase : Tuple = encoder_layers
__lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
@property
def _lowercase ( self : Optional[int] ) -> int:
return self.encoder_attention_heads
@property
def _lowercase ( self : Any ) -> int:
return self.d_model
| 459 | 1 |
import os
def _lowercase ( UpperCAmelCase_ = "input.txt"):
"""simple docstring"""
with open(os.path.join(os.path.dirname(UpperCAmelCase_) , UpperCAmelCase_)) as input_file:
snake_case__ : int = [
[int(UpperCAmelCase_) for element in line.split(""",""")]
for line in input_file.readlines()
]
snake_case__ : Union[str, Any] = len(UpperCAmelCase_)
snake_case__ : List[Any] = len(matrix[0])
snake_case__ : Any = [[-1 for _ in range(UpperCAmelCase_)] for _ in range(UpperCAmelCase_)]
for i in range(UpperCAmelCase_):
snake_case__ : int = matrix[i][0]
for j in range(1 , UpperCAmelCase_):
for i in range(UpperCAmelCase_):
snake_case__ : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCAmelCase_):
snake_case__ : List[str] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j])
for i in range(rows - 2 , -1 , -1):
snake_case__ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j])
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums)
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase_: List[Any] = logging.get_logger(__name__)
class lowercase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Dict ):
snake_case__ : List[str] = question_encoder
snake_case__ : Union[str, Any] = generator
snake_case__ : List[Any] = self.question_encoder
def lowercase ( self : Dict , __a : Dict ):
if os.path.isfile(__a ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__a , exist_ok=__a )
snake_case__ : Union[str, Any] = os.path.join(__a , """question_encoder_tokenizer""" )
snake_case__ : Tuple = os.path.join(__a , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__a )
self.generator.save_pretrained(__a )
@classmethod
def lowercase ( cls : Any , __a : str , **__a : Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case__ : List[str] = kwargs.pop("""config""" , __a )
if config is None:
snake_case__ : Union[str, Any] = RagConfig.from_pretrained(__a )
snake_case__ : int = AutoTokenizer.from_pretrained(
__a , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
snake_case__ : Any = AutoTokenizer.from_pretrained(
__a , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__a , generator=__a )
def __call__( self : Dict , *__a : List[str] , **__a : List[Any] ):
return self.current_tokenizer(*__a , **__a )
def lowercase ( self : Union[str, Any] , *__a : Dict , **__a : Optional[int] ):
return self.generator.batch_decode(*__a , **__a )
def lowercase ( self : Tuple , *__a : Tuple , **__a : str ):
return self.generator.decode(*__a , **__a )
def lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.question_encoder
def lowercase ( self : int ):
snake_case__ : Optional[int] = self.generator
def lowercase ( self : str , __a : List[str] , __a : Optional[List[str]] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "longest" , __a : str = None , __a : bool = True , **__a : str , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __a , )
if max_length is None:
snake_case__ : Optional[Any] = self.current_tokenizer.model_max_length
snake_case__ : Any = self(
__a , add_special_tokens=__a , return_tensors=__a , max_length=__a , padding=__a , truncation=__a , **__a , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case__ : Optional[int] = self.current_tokenizer.model_max_length
snake_case__ : str = self(
text_target=__a , add_special_tokens=__a , return_tensors=__a , padding=__a , max_length=__a , truncation=__a , **__a , )
snake_case__ : Optional[Any] = labels["""input_ids"""]
return model_inputs
| 127 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_lowercase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : Optional[int] = ['''audio_values''', '''audio_mask''']
def __init__( self , _lowercase=2_048 , _lowercase=1 , _lowercase=[16, 16] , _lowercase=128 , _lowercase=44_100 , _lowercase=86 , _lowercase=2_048 , _lowercase=0.0 , **_lowercase , ):
"""simple docstring"""
super().__init__(
feature_size=_lowercase , sampling_rate=_lowercase , padding_value=_lowercase , **_lowercase , )
_lowerCAmelCase = spectrogram_length
_lowerCAmelCase = num_channels
_lowerCAmelCase = patch_size
_lowerCAmelCase = feature_size // self.patch_size[1]
_lowerCAmelCase = n_fft
_lowerCAmelCase = sampling_rate // hop_length_to_sampling_rate
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = padding_value
_lowerCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowercase , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_lowercase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _lowercase ( self , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = spectrogram(
_lowercase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_lowerCAmelCase = log_spec[:, :-1]
_lowerCAmelCase = log_spec - 20.0
_lowerCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _lowercase , _lowercase = None , _lowercase = True , _lowercase = None , _lowercase = False , _lowercase = False , **_lowercase , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_lowerCAmelCase = isinstance(_lowercase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
_lowerCAmelCase = is_batched_numpy or (
isinstance(_lowercase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowercase , np.ndarray ):
_lowerCAmelCase = np.asarray(_lowercase , dtype=np.floataa )
elif isinstance(_lowercase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_lowerCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _lowercase ):
_lowerCAmelCase = [np.asarray(_lowercase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_lowerCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_lowerCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_lowerCAmelCase = np.array(_lowercase ).astype(np.floataa )
# convert into correct format for padding
_lowerCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_lowerCAmelCase = np.ones([len(_lowercase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_lowerCAmelCase = padded_audio_features * self.padding_value
for i in range(len(_lowercase ) ):
_lowerCAmelCase = audio_features[i]
_lowerCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
_lowerCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_lowerCAmelCase = {"""audio_values""": padded_audio_features}
_lowerCAmelCase = BatchFeature(data=_lowercase , tensor_type=_lowercase )
return encoded_inputs
| 5 | from math import isqrt, loga
def lowerCAmelCase__ ( a__ ) ->list[int]:
'''simple docstring'''
_UpperCamelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a__ , a__ ):
_UpperCamelCase = False
return [i for i in range(2 , a__ ) if is_prime[i]]
def lowerCAmelCase__ ( a__ = 800_800 , a__ = 800_800 ) ->int:
'''simple docstring'''
_UpperCamelCase = degree * loga(a__ )
_UpperCamelCase = int(a__ )
_UpperCamelCase = calculate_prime_numbers(a__ )
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = len(a__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"{solution() = }")
| 547 | 0 |
import math
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : str = 2
UpperCamelCase__ : Optional[int] = int(math.sqrt(UpperCamelCase__ ) ) # Size of every segment
UpperCamelCase__ : Dict = [True] * (end + 1)
UpperCamelCase__ : Optional[Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(UpperCamelCase__ )
for i in range(start * start , end + 1 , UpperCamelCase__ ):
UpperCamelCase__ : List[Any] = False
start += 1
prime += in_prime
UpperCamelCase__ : Any = end + 1
UpperCamelCase__ : List[Any] = min(2 * end , UpperCamelCase__ )
while low <= n:
UpperCamelCase__ : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
UpperCamelCase__ : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(UpperCamelCase__ , high + 1 , UpperCamelCase__ ):
UpperCamelCase__ : str = False
for j in range(len(UpperCamelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCamelCase__ : Any = high + 1
UpperCamelCase__ : int = min(high + end , UpperCamelCase__ )
return prime
print(sieve(1_0**6))
| 462 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase ={"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462 | 1 |
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Tuple = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , 'models/bert/' ) )
snake_case: str = self.transformer_dir
shutil.copy(
os.path.join(SCREAMING_SNAKE_CASE__ , 'src/transformers/models/bert/modeling_bert.py' ) , os.path.join(self.transformer_dir , 'models/bert/modeling_bert.py' ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = 'src/transformers'
shutil.rmtree(self.transformer_dir )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
snake_case: List[str] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
snake_case: Optional[Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
snake_case: Tuple = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
snake_case: List[Any] = black.format_str(SCREAMING_SNAKE_CASE__ , mode=SCREAMING_SNAKE_CASE__ )
snake_case: int = os.path.join(self.transformer_dir , 'new_code.py' )
with open(SCREAMING_SNAKE_CASE__ , 'w' , newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(SCREAMING_SNAKE_CASE__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
self.assertTrue(f.read() , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[str] = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , REFERENCE_CODE + '\n' , )
# With no empty line at the end
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead' , 'BertLMPredictionHead' , SCREAMING_SNAKE_CASE__ , )
# Copy consistency with rename
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , re.sub('Bert' , 'TestModel' , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with a really long name
snake_case: int = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'
self.check_copy_consistency(
F"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}""" , F"""{long_class_name}LMPredictionHead""" , re.sub('Bert' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel' , 'TestModelLMPredictionHead' , SCREAMING_SNAKE_CASE__ , overwrite_result=re.sub('Bert' , 'TestModel' , SCREAMING_SNAKE_CASE__ ) , )
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[Any] = check_copies.LOCALIZED_READMES['README_zh-hans.md']
snake_case: Dict = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),'
' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**'
' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders'
' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang'
' Luong, Quoc V. Le, Christopher D. Manning.'
)
snake_case: Optional[Any] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case: Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.'
' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文'
' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and'
' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same'
' method has been applied to compress GPT2 into'
' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into'
' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),'
' Multilingual BERT into'
' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German'
' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自'
' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather'
' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,'
' Christopher D. Manning 发布。\n'
)
snake_case , snake_case: Any = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , localized_readme['format_model_list'] )
self.assertFalse(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case , snake_case: List[str] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , localized_readme['format_model_list'] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(SCREAMING_SNAKE_CASE__ )
snake_case: str = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the'
' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for'
' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong'
' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.'
)
snake_case: int = (
'1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and'
' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case: Optional[int] = (
'1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the'
' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of'
' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian'
' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n'
)
snake_case , snake_case: List[str] = check_copies.convert_to_localized_md(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , localized_readme['format_model_list'] )
# Check if the model link is synchronized.
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) | 329 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_SCREAMING_SNAKE_CASE = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_SCREAMING_SNAKE_CASE = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _snake_case (_snake_case : str) -> List[str]:
with open(_snake_case , 'rb') as f:
_lowercase =Image.open(_snake_case)
return im.convert('RGB')
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : Optional[str] =field(
default=_a , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
__lowerCAmelCase : Optional[str] =field(
default=_a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
__lowerCAmelCase : Optional[str] =field(default=_a , metadata={'''help''': '''A folder containing the training data.'''} )
__lowerCAmelCase : Optional[str] =field(default=_a , metadata={'''help''': '''A folder containing the validation data.'''} )
__lowerCAmelCase : Optional[float] =field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
__lowerCAmelCase : Optional[int] =field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
__lowerCAmelCase : Optional[int] =field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.')
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : str =field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
__lowerCAmelCase : Optional[str] =field(
default=_a , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_a )} , )
__lowerCAmelCase : Optional[str] =field(
default=_a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCAmelCase : Optional[str] =field(
default=_a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
__lowerCAmelCase : str =field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
__lowerCAmelCase : str =field(default=_a , metadata={'''help''': '''Name or path of preprocessor config.'''} )
__lowerCAmelCase : bool =field(
default=_a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
__lowerCAmelCase : bool =field(
default=_a , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _snake_case (_snake_case : Any) -> Dict:
_lowercase =torch.stack([example['pixel_values'] for example in examples])
_lowercase =torch.tensor([example['labels'] for example in examples])
return {"pixel_values": pixel_values, "labels": labels}
def _snake_case () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('.json'):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowercase , _lowercase , _lowercase =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
_lowercase , _lowercase , _lowercase =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , _snake_case , _snake_case)
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowercase =training_args.get_process_log_level()
logger.setLevel(_snake_case)
transformers.utils.logging.set_verbosity(_snake_case)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(f'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
_lowercase =None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
_lowercase =get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
# Set seed before initializing model.
set_seed(training_args.seed)
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_lowercase =load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
_lowercase ={}
if data_args.train_dir is not None:
_lowercase =os.path.join(data_args.train_dir , '**')
if data_args.validation_dir is not None:
_lowercase =os.path.join(data_args.validation_dir , '**')
_lowercase =load_dataset(
'imagefolder' , data_files=_snake_case , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
_lowercase =None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _snake_case) and data_args.train_val_split > 0.0:
_lowercase =dataset['train'].train_test_split(data_args.train_val_split)
_lowercase =split['train']
_lowercase =split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowercase =dataset['train'].features['labels'].names
_lowercase , _lowercase ={}, {}
for i, label in enumerate(_snake_case):
_lowercase =str(_snake_case)
_lowercase =label
# Load the accuracy metric from the datasets package
_lowercase =evaluate.load('accuracy')
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_snake_case : Any):
return metric.compute(predictions=np.argmax(p.predictions , axis=1) , references=p.label_ids)
_lowercase =AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_snake_case) , labelaid=_snake_case , idalabel=_snake_case , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowercase =AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
_lowercase =AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_lowercase =image_processor.size['shortest_edge']
else:
_lowercase =(image_processor.size['height'], image_processor.size['width'])
_lowercase =Normalize(mean=image_processor.image_mean , std=image_processor.image_std)
_lowercase =Compose(
[
RandomResizedCrop(_snake_case),
RandomHorizontalFlip(),
ToTensor(),
normalize,
])
_lowercase =Compose(
[
Resize(_snake_case),
CenterCrop(_snake_case),
ToTensor(),
normalize,
])
def train_transforms(_snake_case : str):
_lowercase =[
_train_transforms(pil_img.convert('RGB')) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_snake_case : Dict):
_lowercase =[_val_transforms(pil_img.convert('RGB')) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset')
if data_args.max_train_samples is not None:
_lowercase =(
dataset['train'].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
dataset["train"].set_transform(_snake_case)
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset')
if data_args.max_eval_samples is not None:
_lowercase =(
dataset['validation'].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
dataset["validation"].set_transform(_snake_case)
# Initalize our trainer
_lowercase =Trainer(
model=_snake_case , args=_snake_case , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_snake_case , tokenizer=_snake_case , data_collator=_snake_case , )
# Training
if training_args.do_train:
_lowercase =None
if training_args.resume_from_checkpoint is not None:
_lowercase =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowercase =last_checkpoint
_lowercase =trainer.train(resume_from_checkpoint=_snake_case)
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics)
trainer.save_metrics('train' , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowercase =trainer.evaluate()
trainer.log_metrics('eval' , _snake_case)
trainer.save_metrics('eval' , _snake_case)
# Write model card and (optionally) push to hub
_lowercase ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_snake_case)
else:
trainer.create_model_card(**_snake_case)
if __name__ == "__main__":
main()
| 181 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : Optional[int] = """canine"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]=768 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=12 , __SCREAMING_SNAKE_CASE : Any=3072 , __SCREAMING_SNAKE_CASE : List[str]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=16384 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : int=1e-1_2 , __SCREAMING_SNAKE_CASE : Tuple=0 , __SCREAMING_SNAKE_CASE : str=0Xe000 , __SCREAMING_SNAKE_CASE : List[Any]=0Xe001 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : int=8 , __SCREAMING_SNAKE_CASE : Tuple=16384 , __SCREAMING_SNAKE_CASE : List[str]=128 , **__SCREAMING_SNAKE_CASE : Any , ) -> Tuple:
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = initializer_range
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = layer_norm_eps
# Character config:
lowerCamelCase_ = downsampling_rate
lowerCamelCase_ = upsampling_kernel_size
lowerCamelCase_ = num_hash_functions
lowerCamelCase_ = num_hash_buckets
lowerCamelCase_ = local_transformer_stride
| 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : str = {
'''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''],
'''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''],
'''processing_mctct''': ['''MCTCTProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : int = [
'''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MCTCTForCTC''',
'''MCTCTModel''',
'''MCTCTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 137 | 0 |
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# load base model
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
UpperCAmelCase_ = load_file(lowerCAmelCase__ )
UpperCAmelCase_ = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
UpperCAmelCase_ = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
UpperCAmelCase_ = pipeline.text_encoder
else:
UpperCAmelCase_ = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
UpperCAmelCase_ = pipeline.unet
# find the target layer
UpperCAmelCase_ = layer_infos.pop(0 )
while len(lowerCAmelCase__ ) > -1:
try:
UpperCAmelCase_ = curr_layer.__getattr__(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = layer_infos.pop(0 )
elif len(lowerCAmelCase__ ) == 0:
break
except Exception:
if len(lowerCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
UpperCAmelCase_ = layer_infos.pop(0 )
UpperCAmelCase_ = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(lowerCAmelCase__ )
else:
pair_keys.append(lowerCAmelCase__ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
UpperCAmelCase_ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
UpperCAmelCase_ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ , lowerCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
UpperCAmelCase_ = state_dict[pair_keys[0]].to(torch.floataa )
UpperCAmelCase_ = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(lowerCAmelCase__ , lowerCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(lowerCAmelCase__ )
return pipeline
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.base_model_path
lowerCamelCase = args.checkpoint_path
lowerCamelCase = args.dump_path
lowerCamelCase = args.lora_prefix_unet
lowerCamelCase = args.lora_prefix_text_encoder
lowerCamelCase = args.alpha
lowerCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
lowerCamelCase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 82 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[Any] = {
'''configuration_roberta_prelayernorm''': [
'''ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''RobertaPreLayerNormConfig''',
'''RobertaPreLayerNormOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaPreLayerNormForCausalLM''',
'''RobertaPreLayerNormForMaskedLM''',
'''RobertaPreLayerNormForMultipleChoice''',
'''RobertaPreLayerNormForQuestionAnswering''',
'''RobertaPreLayerNormForSequenceClassification''',
'''RobertaPreLayerNormForTokenClassification''',
'''RobertaPreLayerNormModel''',
'''RobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : str = [
'''TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaPreLayerNormForCausalLM''',
'''TFRobertaPreLayerNormForMaskedLM''',
'''TFRobertaPreLayerNormForMultipleChoice''',
'''TFRobertaPreLayerNormForQuestionAnswering''',
'''TFRobertaPreLayerNormForSequenceClassification''',
'''TFRobertaPreLayerNormForTokenClassification''',
'''TFRobertaPreLayerNormMainLayer''',
'''TFRobertaPreLayerNormModel''',
'''TFRobertaPreLayerNormPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''FlaxRobertaPreLayerNormForCausalLM''',
'''FlaxRobertaPreLayerNormForMaskedLM''',
'''FlaxRobertaPreLayerNormForMultipleChoice''',
'''FlaxRobertaPreLayerNormForQuestionAnswering''',
'''FlaxRobertaPreLayerNormForSequenceClassification''',
'''FlaxRobertaPreLayerNormForTokenClassification''',
'''FlaxRobertaPreLayerNormModel''',
'''FlaxRobertaPreLayerNormPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 335 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _UpperCAmelCase :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=[1, 1, 2] , a__=1 , a__=32 , a__=4 , a__=8 , a__=37 , a__="gelu_new" , a__=0.1 , a__=0.1 , a__=0.0 , a__=512 , a__=3 , a__=0.02 , a__=3 , a__=4 , a__=None , a__=False , ):
A_ : Dict = parent
A_ : List[str] = batch_size
A_ : Any = seq_length
A_ : Optional[int] = is_training
A_ : Optional[Any] = use_input_mask
A_ : int = use_token_type_ids
A_ : Any = use_labels
A_ : Dict = vocab_size
A_ : Any = block_sizes
A_ : str = num_decoder_layers
A_ : List[Any] = d_model
A_ : Optional[Any] = n_head
A_ : Optional[Any] = d_head
A_ : int = d_inner
A_ : Dict = hidden_act
A_ : Tuple = hidden_dropout
A_ : Tuple = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Any = max_position_embeddings
A_ : Tuple = type_vocab_size
A_ : str = 2
A_ : List[Any] = num_labels
A_ : str = num_choices
A_ : Dict = scope
A_ : Tuple = initializer_std
# Used in the tests to check the size of the first attention layer
A_ : int = n_head
# Used in the tests to check the size of the first hidden state
A_ : List[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ : Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ : List[str] = self.num_hidden_layers + 2
def _lowerCamelCase ( self ):
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : Optional[Any] = None
if self.use_input_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[str] = None
if self.use_token_type_ids:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : Union[str, Any] = None
A_ : List[Any] = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
A_ : Tuple = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : Dict = TFFunnelModel(config=a__ )
A_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : str = model(a__ )
A_ : List[str] = [input_ids, input_mask]
A_ : str = model(a__ )
A_ : List[Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ : Optional[int] = False
A_ : Optional[int] = TFFunnelModel(config=a__ )
A_ : Optional[Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ : Dict = False
A_ : Optional[Any] = TFFunnelModel(config=a__ )
A_ : Any = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : Optional[Any] = TFFunnelBaseModel(config=a__ )
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : int = model(a__ )
A_ : Tuple = [input_ids, input_mask]
A_ : Optional[int] = model(a__ )
A_ : List[Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A_ : str = False
A_ : List[Any] = TFFunnelBaseModel(config=a__ )
A_ : Optional[int] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A_ : Union[str, Any] = False
A_ : List[Any] = TFFunnelBaseModel(config=a__ )
A_ : str = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : str = TFFunnelForPreTraining(config=a__ )
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : str = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : Optional[int] = TFFunnelForMaskedLM(config=a__ )
A_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : Optional[int] = self.num_labels
A_ : str = TFFunnelForSequenceClassification(config=a__ )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : Tuple = self.num_choices
A_ : Any = TFFunnelForMultipleChoice(config=a__ )
A_ : Any = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ : Dict = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ : Tuple = tf.tile(tf.expand_dims(a__ , 1 ) , (1, self.num_choices, 1) )
A_ : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ : int = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : Any = self.num_labels
A_ : Dict = TFFunnelForTokenClassification(config=a__ )
A_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Union[str, Any] = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
A_ : str = TFFunnelForQuestionAnswering(config=a__ )
A_ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[Any] = model(a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ):
A_ : List[Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a = False
a = False
def _lowerCamelCase ( self ):
A_ : Optional[int] = TFFunnelModelTester(self )
A_ : int = ConfigTester(self , config_class=a__ )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCamelCase ( self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _lowerCamelCase ( self ):
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def _lowerCamelCase ( self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@require_tf
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a = False
a = False
def _lowerCamelCase ( self ):
A_ : Optional[Any] = TFFunnelModelTester(self , base=a__ )
A_ : str = ConfigTester(self , config_class=a__ )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*a__ )
def _lowerCamelCase ( self ):
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def _lowerCamelCase ( self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a__ )
| 481 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 481 | 1 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 4_000_000 ):
__UpperCAmelCase = [0, 1]
__UpperCAmelCase = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__UpperCAmelCase = 0
for j in range(len(snake_case_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 49 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCamelCase_ : List[str] = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = "retribert"
def __init__( self : str , _snake_case : List[str]=30_522 , _snake_case : List[Any]=768 , _snake_case : List[Any]=8 , _snake_case : Union[str, Any]=12 , _snake_case : Optional[Any]=3_072 , _snake_case : List[Any]="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Dict=0.1 , _snake_case : str=512 , _snake_case : Tuple=2 , _snake_case : Dict=0.0_2 , _snake_case : Optional[Any]=1e-12 , _snake_case : Tuple=True , _snake_case : Union[str, Any]=128 , _snake_case : Union[str, Any]=0 , **_snake_case : Any , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = share_encoders
A_ = projection_dim
| 115 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if len(_A ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
SCREAMING_SNAKE_CASE__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 472 |
from collections.abc import Callable
import numpy as np
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE__ = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE__ = ya
SCREAMING_SNAKE_CASE__ = xa
for k in range(_A ):
SCREAMING_SNAKE_CASE__ = y[k] + step_size * ode_func(_A , y[k] )
SCREAMING_SNAKE_CASE__ = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 472 | 1 |
from __future__ import annotations
def _a ( lowercase__ : list ):
'''simple docstring'''
if not nums:
raise ValueError('List is empty' )
return sum(lowercase__ ) / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
'''simple docstring'''
import os
import sys
import unittest
__lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__lowerCAmelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__lowerCAmelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
a_ = get_test_to_tester_mapping(UpperCamelCase__ )
a_ = get_test_to_tester_mapping(UpperCamelCase__ )
a_ = {'BertModelTest': 'BertModelTester'}
a_ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = get_model_to_test_mapping(UpperCamelCase__ )
a_ = get_model_to_test_mapping(UpperCamelCase__ )
a_ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
a_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
def _a ( self ):
"""simple docstring"""
a_ = get_model_to_tester_mapping(UpperCamelCase__ )
a_ = get_model_to_tester_mapping(UpperCamelCase__ )
a_ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
a_ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(get_test_info.to_json(UpperCamelCase__ ) , UpperCamelCase__ )
| 536 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =42
SCREAMING_SNAKE_CASE_ : Any =jnp.floataa
SCREAMING_SNAKE_CASE_ : Union[str, Any] =True
def _a (self ) -> Optional[int]:
'''simple docstring'''
super().setup()
UpperCamelCase__ = nn.Dense(5 , dtype=self.dtype )
def __call__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : str =FlaxBigBirdForNaturalQuestionsModule
def __UpperCamelCase ( A , A , A , A , A , A ):
def cross_entropy(A , A , A=None ):
UpperCamelCase__ = logits.shape[-1]
UpperCamelCase__ = (labels[..., None] == jnp.arange(_snake_case )[None]).astype('''f4''' )
UpperCamelCase__ = jax.nn.log_softmax(_snake_case , axis=-1 )
UpperCamelCase__ = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCamelCase__ = reduction(_snake_case )
return loss
UpperCamelCase__ = partial(_snake_case , reduction=jnp.mean )
UpperCamelCase__ = cross_entropy(_snake_case , _snake_case )
UpperCamelCase__ = cross_entropy(_snake_case , _snake_case )
UpperCamelCase__ = cross_entropy(_snake_case , _snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _A :
SCREAMING_SNAKE_CASE_ : List[str] ="google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE_ : Dict =30_00
SCREAMING_SNAKE_CASE_ : Any =1_05_00
SCREAMING_SNAKE_CASE_ : Dict =1_28
SCREAMING_SNAKE_CASE_ : Tuple =3
SCREAMING_SNAKE_CASE_ : Union[str, Any] =1
SCREAMING_SNAKE_CASE_ : Tuple =5
# tx_args
SCREAMING_SNAKE_CASE_ : str =3e-5
SCREAMING_SNAKE_CASE_ : Union[str, Any] =0.0
SCREAMING_SNAKE_CASE_ : Dict =2_00_00
SCREAMING_SNAKE_CASE_ : Dict =0.0_0_9_5
SCREAMING_SNAKE_CASE_ : Any ="bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE_ : Any ="training-expt"
SCREAMING_SNAKE_CASE_ : List[Any] ="data/nq-training.jsonl"
SCREAMING_SNAKE_CASE_ : str ="data/nq-validation.jsonl"
def _a (self ) -> int:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = os.path.join(self.base_dir , self.save_dir )
UpperCamelCase__ = self.batch_size_per_device * jax.device_count()
@dataclass
class _A :
SCREAMING_SNAKE_CASE_ : List[str] =42
SCREAMING_SNAKE_CASE_ : str =40_96 # no dynamic padding on TPUs
def __call__(self , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ = self.collate_fn(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return batch
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.fetch_inputs(features['''input_ids'''] )
UpperCamelCase__ = {
'''input_ids''': jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [self._fetch_inputs(SCREAMING_SNAKE_CASE_ ) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = [1 for _ in range(len(SCREAMING_SNAKE_CASE_ ) )]
while len(SCREAMING_SNAKE_CASE_ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __UpperCamelCase ( A , A , A=None ):
if seed is not None:
UpperCamelCase__ = dataset.shuffle(seed=_snake_case )
for i in range(len(_snake_case ) // batch_size ):
UpperCamelCase__ = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_snake_case )
@partial(jax.pmap , axis_name='''batch''' )
def __UpperCamelCase ( A , A , **A ):
def loss_fn(A ):
UpperCamelCase__ = model_inputs.pop('''start_labels''' )
UpperCamelCase__ = model_inputs.pop('''end_labels''' )
UpperCamelCase__ = model_inputs.pop('''pooled_labels''' )
UpperCamelCase__ = state.apply_fn(**_snake_case , params=_snake_case , dropout_rng=_snake_case , train=_snake_case )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = outputs
return state.loss_fn(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
UpperCamelCase__ , UpperCamelCase__ = jax.random.split(_snake_case )
UpperCamelCase__ = jax.value_and_grad(_snake_case )
UpperCamelCase__ , UpperCamelCase__ = grad_fn(state.params )
UpperCamelCase__ = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
UpperCamelCase__ = jax.lax.pmean(_snake_case , '''batch''' )
UpperCamelCase__ = state.apply_gradients(grads=_snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __UpperCamelCase ( A , **A ):
UpperCamelCase__ = model_inputs.pop('''start_labels''' )
UpperCamelCase__ = model_inputs.pop('''end_labels''' )
UpperCamelCase__ = model_inputs.pop('''pooled_labels''' )
UpperCamelCase__ = state.apply_fn(**_snake_case , params=state.params , train=_snake_case )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = outputs
UpperCamelCase__ = state.loss_fn(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCamelCase__ = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _A ( train_state.TrainState ):
SCREAMING_SNAKE_CASE_ : Tuple =struct.field(pytree_node=__UpperCamelCase )
@dataclass
class _A :
SCREAMING_SNAKE_CASE_ : Tuple =42
SCREAMING_SNAKE_CASE_ : str =42
SCREAMING_SNAKE_CASE_ : int =42
SCREAMING_SNAKE_CASE_ : Any =42
SCREAMING_SNAKE_CASE_ : int =42
SCREAMING_SNAKE_CASE_ : Optional[int] =42
SCREAMING_SNAKE_CASE_ : Union[str, Any] =None
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = model.params
UpperCamelCase__ = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
UpperCamelCase__ , UpperCamelCase__ = build_tx(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = args
UpperCamelCase__ = data_collator
UpperCamelCase__ = lr
UpperCamelCase__ = params
UpperCamelCase__ = jax_utils.replicate(SCREAMING_SNAKE_CASE_ )
return state
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.args
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // args.batch_size
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa )
UpperCamelCase__ = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=F"Running EPOCH-{epoch}" ):
UpperCamelCase__ = self.data_collator(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
UpperCamelCase__ = jax_utils.unreplicate(state.step )
UpperCamelCase__ = running_loss.item() / i
UpperCamelCase__ = self.scheduler_fn(state_step - 1 )
UpperCamelCase__ = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_ ) )
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size )
UpperCamelCase__ = len(SCREAMING_SNAKE_CASE_ ) // self.args.batch_size
UpperCamelCase__ = jnp.array(0 , dtype=jnp.floataa )
UpperCamelCase__ = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc='''Evaluating ... ''' ):
UpperCamelCase__ = self.data_collator(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=''' ... ''' )
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , '''data_collator.joblib''' ) )
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , SCREAMING_SNAKE_CASE_ )
print('''DONE''' )
def __UpperCamelCase ( A , A ):
print(f"RESTORING CHECKPOINT FROM {save_dir}" , end=''' ... ''' )
with open(os.path.join(_snake_case , '''flax_model.msgpack''' ) , '''rb''' ) as f:
UpperCamelCase__ = from_bytes(state.params , f.read() )
with open(os.path.join(_snake_case , '''opt_state.msgpack''' ) , '''rb''' ) as f:
UpperCamelCase__ = from_bytes(state.opt_state , f.read() )
UpperCamelCase__ = joblib.load(os.path.join(_snake_case , '''args.joblib''' ) )
UpperCamelCase__ = joblib.load(os.path.join(_snake_case , '''data_collator.joblib''' ) )
with open(os.path.join(_snake_case , '''training_state.json''' ) , '''r''' ) as f:
UpperCamelCase__ = json.load(_snake_case )
UpperCamelCase__ = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __UpperCamelCase ( A , A , A , A ):
UpperCamelCase__ = num_train_steps - warmup_steps
UpperCamelCase__ = optax.linear_schedule(init_value=_snake_case , end_value=_snake_case , transition_steps=_snake_case )
UpperCamelCase__ = optax.linear_schedule(init_value=_snake_case , end_value=1e-7 , transition_steps=_snake_case )
UpperCamelCase__ = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __UpperCamelCase ( A , A , A , A , A ):
def weight_decay_mask(A ):
UpperCamelCase__ = traverse_util.flatten_dict(_snake_case )
UpperCamelCase__ = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(_snake_case )
UpperCamelCase__ = scheduler_fn(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCamelCase__ = optax.adamw(learning_rate=_snake_case , weight_decay=_snake_case , mask=_snake_case )
return tx, lr
| 708 | import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __UpperCamelCase ( A ):
if isinstance(A , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class _A :
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
pass
def _a (self ) -> Optional[int]:
'''simple docstring'''
pass
def _a (self ) -> Tuple:
'''simple docstring'''
pass
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ = np.abs((a - b) ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , F"Difference between torch and flax is {diff} (>= {tol})." )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], config.projection_dim) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_vision_text_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
self.assertEqual(output['''text_embeds'''].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_vision_text_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = after_output[0]
UpperCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-3 )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_vision_text_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(
input_ids=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = output.vision_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ = to_atuple(vision_model.config.image_size )
UpperCamelCase__ = to_atuple(vision_model.config.patch_size )
UpperCamelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase__ = output.text_model_output.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
pt_model.to(SCREAMING_SNAKE_CASE_ )
pt_model.eval()
# prepare inputs
UpperCamelCase__ = inputs_dict
UpperCamelCase__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase__ = pt_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
UpperCamelCase__ = fx_model(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE_ , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ , from_pt=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = fx_model_loaded(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE_ , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ , from_flax=SCREAMING_SNAKE_CASE_ )
pt_model_loaded.to(SCREAMING_SNAKE_CASE_ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase__ = pt_model_loaded(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(SCREAMING_SNAKE_CASE_ , pt_output_loaded.numpy() , 4E-2 )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = fx_state
self.check_pt_flax_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
UpperCamelCase__ = VisionTextDualEncoderConfig.from_vision_text_configs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = VisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , fx_model.params )
self.check_pt_flax_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**SCREAMING_SNAKE_CASE_ )
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_save_load(**SCREAMING_SNAKE_CASE_ )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**SCREAMING_SNAKE_CASE_ )
@is_pt_flax_cross_test
def _a (self ) -> str:
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = config_inputs_dict.pop('''vision_config''' )
UpperCamelCase__ = config_inputs_dict.pop('''text_config''' )
UpperCamelCase__ = config_inputs_dict
self.check_equivalence_pt_to_flax(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.check_equivalence_flax_to_pt(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def _a (self ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = self.get_pretrained_model_and_inputs()
UpperCamelCase__ = model_a(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_a(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = after_outputs[0]
UpperCamelCase__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1E-5 )
@require_flax
class _A ( __UpperCamelCase , unittest.TestCase ):
def _a (self ) -> Any:
'''simple docstring'''
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=SCREAMING_SNAKE_CASE_ , text_from_pt=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = 13
UpperCamelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase__ = random_attention_mask([batch_size, 4] )
UpperCamelCase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = FlaxViTModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
return vision_model, text_model
def _a (self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = FlaxViTModelTester(self )
UpperCamelCase__ = FlaxBertModelTester(self )
UpperCamelCase__ = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase__ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = vision_config_and_inputs
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class _A ( __UpperCamelCase , unittest.TestCase ):
def _a (self ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' , '''hf-internal-testing/tiny-bert''' , vision_from_pt=SCREAMING_SNAKE_CASE_ , text_from_pt=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = 13
UpperCamelCase__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
UpperCamelCase__ = random_attention_mask([batch_size, 4] )
UpperCamelCase__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModel(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = FlaxBertModel(SCREAMING_SNAKE_CASE_ )
return vision_model, text_model
def _a (self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModelTester(self )
UpperCamelCase__ = FlaxBertModelTester(self )
UpperCamelCase__ = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase__ = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ = vision_config_and_inputs
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class _A ( unittest.TestCase ):
@slow
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' , logit_scale_init_value=1.0 )
UpperCamelCase__ = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase__ = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
UpperCamelCase__ = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
| 469 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__a = logging.get_logger(__name__)
__a = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''deberta-v2'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any]=1_2_8_1_0_0 , lowerCAmelCase__ : Optional[int]=1_5_3_6 , lowerCAmelCase__ : Dict=2_4 , lowerCAmelCase__ : Optional[Any]=2_4 , lowerCAmelCase__ : str=6_1_4_4 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : Optional[Any]=5_1_2 , lowerCAmelCase__ : Optional[Any]=0 , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Tuple=1e-7 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : Any=-1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Optional[int]="gelu" , **lowerCAmelCase__ : List[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = type_vocab_size
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Dict = relative_attention
_UpperCAmelCase : Tuple = max_relative_positions
_UpperCAmelCase : Optional[int] = pad_token_id
_UpperCAmelCase : Optional[int] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase__ ) == str:
_UpperCAmelCase : List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
_UpperCAmelCase : Any = pos_att_type
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = kwargs.get("pooler_hidden_size" , lowerCAmelCase__ )
_UpperCAmelCase : Any = pooler_dropout
_UpperCAmelCase : Any = pooler_hidden_act
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
return 1_2
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional["TensorType"] = None , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : int = 4_0 , lowerCAmelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = super().generate_dummy_inputs(preprocessor=lowerCAmelCase__ , framework=lowerCAmelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 494 | '''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''blenderbot-small'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : Optional[int] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , lowerCAmelCase__ : Dict=5_0_2_6_5 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : Optional[Any]=8 , lowerCAmelCase__ : str=2_0_4_8 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=8 , lowerCAmelCase__ : Optional[int]=2_0_4_8 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : List[Any]=2 , lowerCAmelCase__ : str=2 , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : Optional[Any] = d_model
_UpperCAmelCase : List[str] = encoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = encoder_layers
_UpperCAmelCase : List[Any] = encoder_attention_heads
_UpperCAmelCase : Tuple = decoder_ffn_dim
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Union[str, Any] = decoder_attention_heads
_UpperCAmelCase : List[Any] = dropout
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : Optional[Any] = encoder_layerdrop
_UpperCAmelCase : Tuple = decoder_layerdrop
_UpperCAmelCase : List[Any] = use_cache
_UpperCAmelCase : str = encoder_layers
_UpperCAmelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , forced_eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
class A__ ( UpperCamelCase ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase : Tuple = {0: "batch"}
_UpperCAmelCase : Union[str, Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_UpperCAmelCase : Dict = {0: "batch", 1: "decoder_sequence"}
_UpperCAmelCase : int = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase : str = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.num_layers
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
_UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
else:
_UpperCAmelCase : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : Any = super().outputs
else:
_UpperCAmelCase : Union[str, Any] = super(lowerCAmelCase__ , self ).outputs
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase : int = self.num_layers
for i in range(lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = {0: "batch", 2: "past_sequence + sequence"}
_UpperCAmelCase : str = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Generate decoder inputs
_UpperCAmelCase : Any = seq_length if not self.use_past else 1
_UpperCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase : Union[str, Any] = dict(**lowerCAmelCase__ , **lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Dict = common_inputs["input_ids"].shape
_UpperCAmelCase : Optional[Any] = common_inputs["decoder_input_ids"].shape[1]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.num_attention_heads
_UpperCAmelCase : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : Tuple = decoder_seq_length + 3
_UpperCAmelCase : Tuple = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase : Union[str, Any] = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ )] , dim=1 )
_UpperCAmelCase : Tuple = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase , _UpperCAmelCase : str = self.num_layers
_UpperCAmelCase : Optional[int] = min(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = max(lowerCAmelCase__ , lowerCAmelCase__ ) - min_num_layers
_UpperCAmelCase : List[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
torch.zeros(lowerCAmelCase__ ),
) )
# TODO: test this.
_UpperCAmelCase : Tuple = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCAmelCase__ , lowerCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) )
return common_inputs
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Any = seqlen + 2
_UpperCAmelCase , _UpperCAmelCase : Any = self.num_layers
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.num_attention_heads
_UpperCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase : List[str] = common_inputs["attention_mask"].dtype
_UpperCAmelCase : Tuple = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__ )] , dim=1 )
_UpperCAmelCase : Any = [
(torch.zeros(lowerCAmelCase__ ), torch.zeros(lowerCAmelCase__ )) for _ in range(lowerCAmelCase__ )
]
return common_inputs
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase : str = tokenizer.num_special_tokens_to_add(lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = compute_effective_axis_dimension(
lowerCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase : Any = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase : Tuple = dict(tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ ) )
return common_inputs
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
elif self.task == "causal-lm":
_UpperCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
else:
_UpperCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__ )
return common_inputs
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase : List[Any] = super()._flatten_past_key_values_(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCAmelCase : List[Any] = super(lowerCAmelCase__ , self )._flatten_past_key_values_(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) | 494 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : int = {"vocab_file": "spiece.model"}
UpperCamelCase__ : str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
UpperCamelCase__ : Union[str, Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
UpperCamelCase__ : List[Any] = 0
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Dict = 2
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : Optional[int] = 4
class __snake_case ( lowerCAmelCase__ ):
__lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = 'left'
def __init__( self , _A , _A=False , _A=True , _A=False , _A="<s>" , _A="</s>" , _A="<unk>" , _A="<sep>" , _A="<pad>" , _A="<cls>" , _A="<mask>" , _A=["<eop>", "<eod>"] , _A = None , **_A , ):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def lowerCAmelCase__ ( self):
return len(self.sp_model)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self , _A):
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCAmelCase__ ( self , _A):
if self.remove_space:
SCREAMING_SNAKE_CASE_ = ' '.join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE_ = inputs
SCREAMING_SNAKE_CASE_ = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
SCREAMING_SNAKE_CASE_ = unicodedata.normalize('NFKD' , _A)
SCREAMING_SNAKE_CASE_ = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = outputs.lower()
return outputs
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = self.preprocess_text(_A)
SCREAMING_SNAKE_CASE_ = self.sp_model.encode(_A , out_type=_A)
SCREAMING_SNAKE_CASE_ = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
SCREAMING_SNAKE_CASE_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE_ = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def lowerCAmelCase__ ( self , _A):
return self.sp_model.PieceToId(_A)
def lowerCAmelCase__ ( self , _A):
return self.sp_model.IdToPiece(_A)
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = ''.join(_A).replace(_A , ' ').strip()
return out_string
def lowerCAmelCase__ ( self , _A , _A = False , _A = None , _A = True , **_A , ):
SCREAMING_SNAKE_CASE_ = kwargs.pop('use_source_tokenizer' , _A)
SCREAMING_SNAKE_CASE_ = self.convert_ids_to_tokens(_A , skip_special_tokens=_A)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
SCREAMING_SNAKE_CASE_ = []
sub_texts.append(_A)
else:
current_sub_text.append(_A)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
SCREAMING_SNAKE_CASE_ = ''.join(_A)
SCREAMING_SNAKE_CASE_ = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE_ = self.clean_up_tokenization(_A)
return clean_text
else:
return text
def lowerCAmelCase__ ( self , _A , _A = None):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase__ ( self , _A , _A = None , _A = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1, 1]
return ([0] * len(_A)) + [1, 1]
def lowerCAmelCase__ ( self , _A , _A = None):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def lowerCAmelCase__ ( self , _A , _A = None):
if not os.path.isdir(_A):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 707 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
UpperCamelCase__ : Tuple = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
UpperCamelCase__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 620 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json",
"roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Union[str, Any] ="roberta"
def __init__( self : Union[str, Any] , _snake_case : Optional[Any]=5_0265 , _snake_case : Optional[int]=768 , _snake_case : Optional[int]=12 , _snake_case : List[Any]=12 , _snake_case : List[str]=3072 , _snake_case : str="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Any=0.1 , _snake_case : Tuple=512 , _snake_case : Tuple=2 , _snake_case : Any=0.02 , _snake_case : Optional[int]=1E-12 , _snake_case : Tuple=1 , _snake_case : int=0 , _snake_case : Tuple=2 , _snake_case : List[str]="absolute" , _snake_case : List[str]=True , _snake_case : Optional[Any]=None , **_snake_case : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
a__ = classifier_dropout
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
@property
def _lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
a__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 232 | """simple docstring"""
def _lowerCamelCase ( UpperCAmelCase__ = 60_08_51_47_51_43 ) -> int:
'''simple docstring'''
try:
a__ = int(UpperCAmelCase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
a__ = 2
a__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
a__ = i
while n % i == 0:
a__ = n // i
i += 1
return int(UpperCAmelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 232 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase :Tuple = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Dict = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :int = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase :str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 42 |
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42 | 1 |
"""simple docstring"""
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str ):
def get_masked_lm_array(lowerCamelCase__ : str ):
lowercase__ : int = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ : Optional[int] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
if "kernel" in name:
lowercase__ : Tuple = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_array(lowerCamelCase__ : str ):
lowercase__ : Dict = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ : Optional[int] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
if "kernel" in name:
lowercase__ : List[Any] = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_layer_array(lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowercase__ : str = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ : Optional[Any] = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
if "kernel" in name:
lowercase__ : Any = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
def get_encoder_attention_layer_array(lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Dict ):
lowercase__ : Union[str, Any] = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
lowercase__ : int = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Dict = array.reshape(lowerCamelCase__ )
if "kernel" in name:
lowercase__ : int = array.transpose()
return torch.from_numpy(lowerCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
lowercase__ : Optional[Any] = BertConfig.from_json_file(lowerCamelCase__ )
lowercase__ : Optional[int] = BertForMaskedLM(lowerCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
lowercase__ : BertLayer = model.bert.encoder.layer[layer_index]
# Self-attention
lowercase__ : BertSelfAttention = layer.attention.self
lowercase__ : Optional[int] = get_encoder_attention_layer_array(
lowerCamelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
lowercase__ : Any = get_encoder_attention_layer_array(
lowerCamelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
lowercase__ : List[str] = get_encoder_attention_layer_array(
lowerCamelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
lowercase__ : int = get_encoder_attention_layer_array(
lowerCamelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
lowercase__ : Union[str, Any] = get_encoder_attention_layer_array(
lowerCamelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
lowercase__ : Any = get_encoder_attention_layer_array(
lowerCamelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
lowercase__ : BertSelfOutput = layer.attention.output
lowercase__ : Any = get_encoder_attention_layer_array(
lowerCamelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
lowercase__ : Union[str, Any] = get_encoder_attention_layer_array(
lowerCamelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
lowercase__ : Any = get_encoder_layer_array(lowerCamelCase__ , """_attention_layer_norm/gamma""" )
lowercase__ : Optional[Any] = get_encoder_layer_array(lowerCamelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
lowercase__ : BertIntermediate = layer.intermediate
lowercase__ : Union[str, Any] = get_encoder_layer_array(lowerCamelCase__ , """_intermediate_dense/kernel""" )
lowercase__ : Optional[int] = get_encoder_layer_array(lowerCamelCase__ , """_intermediate_dense/bias""" )
# Output
lowercase__ : BertOutput = layer.output
lowercase__ : int = get_encoder_layer_array(lowerCamelCase__ , """_output_dense/kernel""" )
lowercase__ : List[str] = get_encoder_layer_array(lowerCamelCase__ , """_output_dense/bias""" )
lowercase__ : Any = get_encoder_layer_array(lowerCamelCase__ , """_output_layer_norm/gamma""" )
lowercase__ : str = get_encoder_layer_array(lowerCamelCase__ , """_output_layer_norm/beta""" )
# Embeddings
lowercase__ : int = get_encoder_array("""_position_embedding_layer/embeddings""" )
lowercase__ : Any = get_encoder_array("""_type_embedding_layer/embeddings""" )
lowercase__ : Optional[Any] = get_encoder_array("""_embedding_norm_layer/gamma""" )
lowercase__ : Any = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
lowercase__ : Tuple = model.cls.predictions.transform
lowercase__ : Tuple = get_masked_lm_array("""dense/kernel""" )
lowercase__ : Dict = get_masked_lm_array("""dense/bias""" )
lowercase__ : str = get_masked_lm_array("""layer_norm/gamma""" )
lowercase__ : List[str] = get_masked_lm_array("""layer_norm/beta""" )
lowercase__ : Optional[int] = get_masked_lm_array("""embedding_table""" )
# Pooling
lowercase__ : List[Any] = BertPooler(config=lowerCamelCase__ )
lowercase__ : BertPooler = get_encoder_array("""_pooler_layer/kernel""" )
lowercase__ : BertPooler = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(lowerCamelCase__ )
# Integration test - should load without any errors ;)
lowercase__ : Any = BertForMaskedLM.from_pretrained(lowerCamelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
__snake_case = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 200 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
__snake_case = {
'camembert-base': 512,
}
__snake_case = '▁'
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : List[Any] = VOCAB_FILES_NAMES
_a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=["<s>NOTUSED", "</s>NOTUSED"] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
lowercase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowercase__ : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase__ : Optional[int] = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowercase__ : int = len(self.fairseq_tokens_to_ids )
lowercase__ : Dict = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Any = [self.cls_token_id]
lowercase__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : int = [self.sep_token_id]
lowercase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__( self ) -> Optional[Any]:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : Dict = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowerCamelCase__ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> str:
lowercase__ : Dict = []
lowercase__ : Dict = """"""
lowercase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
lowercase__ : List[Any] = True
lowercase__ : str = []
else:
current_sub_tokens.append(lowerCamelCase__ )
lowercase__ : Optional[Any] = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self ) -> int:
lowercase__ : Dict = self.__dict__.copy()
lowercase__ : Union[str, Any] = None
return state
def __setstate__( self , lowerCamelCase__ ) -> int:
lowercase__ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : Tuple = {}
lowercase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,) | 200 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Dict = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ['ViTFeatureExtractor']
__A : Union[str, Any] = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 126 |
'''simple docstring'''
from __future__ import annotations
__A : Optional[int] = list[list[int]]
# assigning initial values to the grid
__A : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCAmelCase_ ( a : Matrix , a : int , a : int , a : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCAmelCase_ ( a : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCAmelCase_ ( a : Matrix ):
if location := find_empty_location(a ):
a__ , a__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
a__ = digit
if sudoku(a ) is not None:
return grid
a__ = 0
return None
def lowerCAmelCase_ ( a : Matrix ):
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__A : Optional[int] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 126 | 1 |
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''microsoft/xprophetnet-large-wiki100-cased''': (
'''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'''
),
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "xlm-prophetnet"
snake_case__ : List[Any] = ["past_key_values"]
snake_case__ : Any = {
"num_attention_heads": "num_encoder_attention_heads",
}
def __init__( self : int , __lowerCAmelCase : Optional[float] = 0.1 , __lowerCAmelCase : Optional[Union[str, Callable]] = "gelu" , __lowerCAmelCase : Optional[int] = 3_0_5_2_2 , __lowerCAmelCase : Optional[int] = 1_0_2_4 , __lowerCAmelCase : Optional[int] = 4_0_9_6 , __lowerCAmelCase : Optional[int] = 1_2 , __lowerCAmelCase : Optional[int] = 1_6 , __lowerCAmelCase : Optional[int] = 4_0_9_6 , __lowerCAmelCase : Optional[int] = 1_2 , __lowerCAmelCase : Optional[int] = 1_6 , __lowerCAmelCase : Optional[float] = 0.1 , __lowerCAmelCase : Optional[float] = 0.1 , __lowerCAmelCase : Optional[int] = 5_1_2 , __lowerCAmelCase : Optional[float] = 0.02 , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = 0 , __lowerCAmelCase : Optional[int] = 2 , __lowerCAmelCase : Optional[int] = 3_2 , __lowerCAmelCase : Optional[int] = 1_2_8 , __lowerCAmelCase : Optional[bool] = False , __lowerCAmelCase : Optional[float] = 0.0 , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = 0 , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : Optional[int] = 2 , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = encoder_ffn_dim
_lowerCamelCase : int = num_encoder_layers
_lowerCamelCase : Optional[Any] = num_encoder_attention_heads
_lowerCamelCase : Union[str, Any] = decoder_ffn_dim
_lowerCamelCase : Optional[Any] = num_decoder_layers
_lowerCamelCase : Optional[Any] = num_decoder_attention_heads
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : List[Any] = init_std # Normal(0, this parameter)
_lowerCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_lowerCamelCase : Optional[Any] = ngram
_lowerCamelCase : Any = num_buckets
_lowerCamelCase : int = relative_max_distance
_lowerCamelCase : Union[str, Any] = disable_ngram_loss
_lowerCamelCase : Dict = eps
# 3 Types of Dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : int = dropout
_lowerCamelCase : Any = use_cache
super().__init__(
pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , add_cross_attention=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , **__lowerCAmelCase , )
@property
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 83 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ :str = logging.get_logger(__name__)
UpperCamelCase__ :Optional[int] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A( lowerCamelCase__ ):
"""simple docstring"""
A = "markuplm"
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=10_24 , SCREAMING_SNAKE_CASE__=2_16 , SCREAMING_SNAKE_CASE__=10_01 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=50 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_UpperCamelCase :Any = vocab_size
_UpperCamelCase :Union[str, Any] = hidden_size
_UpperCamelCase :List[Any] = num_hidden_layers
_UpperCamelCase :int = num_attention_heads
_UpperCamelCase :Tuple = hidden_act
_UpperCamelCase :str = intermediate_size
_UpperCamelCase :Optional[int] = hidden_dropout_prob
_UpperCamelCase :Any = attention_probs_dropout_prob
_UpperCamelCase :Union[str, Any] = max_position_embeddings
_UpperCamelCase :Dict = type_vocab_size
_UpperCamelCase :int = initializer_range
_UpperCamelCase :Optional[int] = layer_norm_eps
_UpperCamelCase :Any = position_embedding_type
_UpperCamelCase :Any = use_cache
_UpperCamelCase :List[str] = classifier_dropout
# additional properties
_UpperCamelCase :Union[str, Any] = max_depth
_UpperCamelCase :Union[str, Any] = max_xpath_tag_unit_embeddings
_UpperCamelCase :Optional[Any] = max_xpath_subs_unit_embeddings
_UpperCamelCase :int = tag_pad_id
_UpperCamelCase :str = subs_pad_id
_UpperCamelCase :List[str] = xpath_unit_hidden_size
| 355 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__A =logging.get_logger(__name__)
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None ):
'''simple docstring'''
if "." in tensor_name:
__UpperCAmelCase : Optional[int] = tensor_name.split('''.''' )
for split in splits[:-1]:
__UpperCAmelCase : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if new_module is None:
raise ValueError(f'{module} has no attribute {split}.' )
__UpperCAmelCase : Any = new_module
__UpperCAmelCase : Union[str, Any] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f'{module} does not have a parameter or a buffer named {tensor_name}.' )
__UpperCAmelCase : Union[str, Any] = tensor_name in module._buffers
__UpperCAmelCase : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[Any] = False
if is_buffer or not is_bitsandbytes_available():
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Union[str, Any] = False
else:
__UpperCAmelCase : int = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__UpperCAmelCase : int = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__UpperCAmelCase : str = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__UpperCAmelCase : Optional[int] = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
__UpperCAmelCase : Any = value.to('''cpu''' )
if value.dtype == torch.inta:
__UpperCAmelCase : List[str] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
__UpperCAmelCase : List[Any] = torch.tensor(_UpperCAmelCase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _UpperCAmelCase ) and fpaa_statistics is None:
__UpperCAmelCase : str = new_value.T
__UpperCAmelCase : Dict = old_value.__dict__
if is_abit:
__UpperCAmelCase : Tuple = bnb.nn.IntaParams(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
__UpperCAmelCase : Union[str, Any] = bnb.nn.Paramsabit(_UpperCAmelCase , requires_grad=_UpperCAmelCase , **_UpperCAmelCase ).to(_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
__UpperCAmelCase : Tuple = old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
__UpperCAmelCase : Optional[Any] = value.to(_UpperCAmelCase )
else:
__UpperCAmelCase : Optional[int] = torch.tensor(_UpperCAmelCase , device=_UpperCAmelCase )
if is_buffer:
__UpperCAmelCase : List[Any] = new_value
else:
__UpperCAmelCase : Tuple = nn.Parameter(_UpperCAmelCase , requires_grad=old_value.requires_grad )
__UpperCAmelCase : List[Any] = new_value
def a ( _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : List[str]=False ):
'''simple docstring'''
for name, module in model.named_children():
if current_key_name is None:
__UpperCAmelCase : List[str] = []
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase , nn.Linear ) or isinstance(_UpperCAmelCase , _UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : Dict = module.weight.shape
else:
__UpperCAmelCase : Optional[int] = module.in_features
__UpperCAmelCase : Any = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__UpperCAmelCase : Union[str, Any] = bnb.nn.LinearabitLt(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__UpperCAmelCase : Optional[Any] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__UpperCAmelCase : Union[str, Any] = bnb.nn.Linearabit(
_UpperCAmelCase , _UpperCAmelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__UpperCAmelCase : str = True
# Store the module class in case we need to transpose the weight later
__UpperCAmelCase : Any = type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
__UpperCAmelCase : Dict = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , has_been_replaced=_UpperCAmelCase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Optional[Any]=None ):
'''simple docstring'''
__UpperCAmelCase : Any = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
__UpperCAmelCase : List[str] = _replace_with_bnb_linear(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Optional[int] ):
'''simple docstring'''
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _UpperCAmelCase , )
return replace_with_bnb_linear(*_UpperCAmelCase , **_UpperCAmelCase )
def a ( *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : str ):
'''simple docstring'''
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _UpperCAmelCase , )
return set_module_quantized_tensor_to_device(*_UpperCAmelCase , **_UpperCAmelCase )
def a ( _UpperCAmelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__UpperCAmelCase : List[Any] = find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__UpperCAmelCase : Dict = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__UpperCAmelCase : Tuple = sum(_UpperCAmelCase , [] )
__UpperCAmelCase : int = len(_UpperCAmelCase ) > 0
# Check if it is a base model
__UpperCAmelCase : Optional[Any] = not hasattr(_UpperCAmelCase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__UpperCAmelCase : Dict = list(model.named_children() )
__UpperCAmelCase : str = [list_modules[-1][0]]
# add last module together with tied weights
__UpperCAmelCase : List[str] = set(_UpperCAmelCase ) - set(_UpperCAmelCase )
__UpperCAmelCase : Dict = list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
__UpperCAmelCase : Union[str, Any] = ['''.weight''', '''.bias''']
__UpperCAmelCase : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__UpperCAmelCase : Optional[Any] = name.replace(_UpperCAmelCase , '''''' )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 713 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A ={
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 241 | 0 |
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = word.split()
def justify(lowercase , lowercase , lowercase ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = max_width - width
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
if len(lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
SCREAMING_SNAKE_CASE : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
SCREAMING_SNAKE_CASE : Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
SCREAMING_SNAKE_CASE : Tuple = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(lowercase ):
num_spaces_between_words_list[i] += 1
SCREAMING_SNAKE_CASE : Any = []
for i in range(lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(lowercase )
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : list[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for word in words:
if width + len(lowercase ) + len(lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(lowercase )
width += len(lowercase )
else:
# justify the line and add it to result
answer.append(justify(lowercase , lowercase , lowercase ) )
# reset new line and new width
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = [word], len(lowercase )
SCREAMING_SNAKE_CASE : Dict = max_width - width - len(lowercase )
answer.append(" ".join(lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 62 | """simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _snake_case : list[int] ) -> None:
SCREAMING_SNAKE_CASE__ = len(_snake_case )
SCREAMING_SNAKE_CASE__ = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE__ = array[0]
for i in range(1 , _snake_case ):
SCREAMING_SNAKE_CASE__ = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int , _snake_case : int ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase_ ( self : Union[str, Any] , _snake_case : int ) -> bool:
SCREAMING_SNAKE_CASE__ = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_snake_case )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 | 0 |
import os
from datetime import datetime as dt
from github import Github
_A = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = Github(os.environ["GITHUB_TOKEN"] )
lowerCAmelCase_ = g.get_repo("huggingface/diffusers" )
lowerCAmelCase_ = repo.get_issues(state="open" )
for issue in open_issues:
lowerCAmelCase_ = sorted(issue.get_comments() , key=lambda __lowerCAmelCase : i.created_at , reverse=__a )
lowerCAmelCase_ = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 720 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _lowerCAmelCase ( __a , __a ):
@register_to_config
def __init__( self , _UpperCamelCase = 128 , _UpperCamelCase = 256 , _UpperCamelCase = 2000.0 , _UpperCamelCase = 768 , _UpperCamelCase = 12 , _UpperCamelCase = 12 , _UpperCamelCase = 64 , _UpperCamelCase = 2_048 , _UpperCamelCase = 0.1 , ) -> str:
super().__init__()
lowerCAmelCase_ = nn.Sequential(
nn.Linear(_UpperCamelCase , d_model * 4 , bias=_UpperCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_UpperCamelCase ) , nn.SiLU() , )
lowerCAmelCase_ = nn.Embedding(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = False
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(p=_UpperCamelCase )
lowerCAmelCase_ = nn.ModuleList()
for lyr_num in range(_UpperCamelCase ):
# FiLM conditional T5 decoder
lowerCAmelCase_ = DecoderLayer(d_model=_UpperCamelCase , d_kv=_UpperCamelCase , num_heads=_UpperCamelCase , d_ff=_UpperCamelCase , dropout_rate=_UpperCamelCase )
self.decoders.append(_UpperCamelCase )
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(p=_UpperCamelCase )
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCAmelCase_ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCAmelCase_ = self.conditioning_emb(_UpperCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCAmelCase_ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCAmelCase_ = torch.broadcast_to(
torch.arange(_UpperCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCAmelCase_ = self.position_encoding(_UpperCamelCase )
lowerCAmelCase_ = self.continuous_inputs_projection(_UpperCamelCase )
inputs += position_encodings
lowerCAmelCase_ = self.dropout(_UpperCamelCase )
# decoder: No padding present.
lowerCAmelCase_ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCAmelCase_ = [(x, self.encoder_decoder_mask(_UpperCamelCase , _UpperCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCAmelCase_ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCAmelCase_ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCAmelCase_ = lyr(
_UpperCamelCase , conditioning_emb=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )[0]
lowerCAmelCase_ = self.decoder_norm(_UpperCamelCase )
lowerCAmelCase_ = self.post_dropout(_UpperCamelCase )
lowerCAmelCase_ = self.spec_out(_UpperCamelCase )
return spec_out
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-6 ) -> Dict:
super().__init__()
lowerCAmelCase_ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_UpperCamelCase , d_kv=_UpperCamelCase , num_heads=_UpperCamelCase , dropout_rate=_UpperCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_UpperCamelCase , d_kv=_UpperCamelCase , num_heads=_UpperCamelCase , dropout_rate=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_UpperCamelCase , d_ff=_UpperCamelCase , dropout_rate=_UpperCamelCase , layer_norm_epsilon=_UpperCamelCase ) )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ) -> Any:
lowerCAmelCase_ = self.layer[0](
_UpperCamelCase , conditioning_emb=_UpperCamelCase , attention_mask=_UpperCamelCase , )
if encoder_hidden_states is not None:
lowerCAmelCase_ = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
lowerCAmelCase_ = self.layer[1](
_UpperCamelCase , key_value_states=_UpperCamelCase , attention_mask=_UpperCamelCase , )
# Apply Film Conditional Feed Forward layer
lowerCAmelCase_ = self.layer[-1](_UpperCamelCase , _UpperCamelCase )
return (hidden_states,)
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
super().__init__()
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase )
lowerCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCamelCase )
lowerCAmelCase_ = Attention(query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , out_bias=_UpperCamelCase , scale_qk=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , ) -> Dict:
# pre_self_attention_layer_norm
lowerCAmelCase_ = self.layer_norm(_UpperCamelCase )
if conditioning_emb is not None:
lowerCAmelCase_ = self.FiLMLayer(_UpperCamelCase , _UpperCamelCase )
# Self-attention block
lowerCAmelCase_ = self.attention(_UpperCamelCase )
lowerCAmelCase_ = hidden_states + self.dropout(_UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
super().__init__()
lowerCAmelCase_ = Attention(query_dim=_UpperCamelCase , heads=_UpperCamelCase , dim_head=_UpperCamelCase , out_bias=_UpperCamelCase , scale_qk=_UpperCamelCase )
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase , eps=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , ) -> int:
lowerCAmelCase_ = self.layer_norm(_UpperCamelCase )
lowerCAmelCase_ = self.attention(
_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowerCAmelCase_ = hidden_states + self.dropout(_UpperCamelCase )
return layer_output
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
super().__init__()
lowerCAmelCase_ = TaDenseGatedActDense(d_model=_UpperCamelCase , d_ff=_UpperCamelCase , dropout_rate=_UpperCamelCase )
lowerCAmelCase_ = TaFiLMLayer(in_features=d_model * 4 , out_features=_UpperCamelCase )
lowerCAmelCase_ = TaLayerNorm(_UpperCamelCase , eps=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase=None ) -> Tuple:
lowerCAmelCase_ = self.layer_norm(_UpperCamelCase )
if conditioning_emb is not None:
lowerCAmelCase_ = self.film(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = self.DenseReluDense(_UpperCamelCase )
lowerCAmelCase_ = hidden_states + self.dropout(_UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
super().__init__()
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
lowerCAmelCase_ = nn.Dropout(_UpperCamelCase )
lowerCAmelCase_ = NewGELUActivation()
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = self.act(self.wi_a(_UpperCamelCase ) )
lowerCAmelCase_ = self.wi_a(_UpperCamelCase )
lowerCAmelCase_ = hidden_gelu * hidden_linear
lowerCAmelCase_ = self.dropout(_UpperCamelCase )
lowerCAmelCase_ = self.wo(_UpperCamelCase )
return hidden_states
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=1e-6 ) -> int:
super().__init__()
lowerCAmelCase_ = nn.Parameter(torch.ones(_UpperCamelCase ) )
lowerCAmelCase_ = eps
def __a ( self , _UpperCamelCase ) -> Union[str, Any]:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCAmelCase_ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_UpperCamelCase )
lowerCAmelCase_ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCAmelCase_ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _lowerCAmelCase ( nn.Module ):
def __a ( self , _UpperCamelCase ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(_UpperCamelCase , 3.0 )) ))
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> str:
super().__init__()
lowerCAmelCase_ = nn.Linear(_UpperCamelCase , out_features * 2 , bias=_UpperCamelCase )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = self.scale_bias(_UpperCamelCase )
lowerCAmelCase_ , lowerCAmelCase_ = torch.chunk(_UpperCamelCase , 2 , -1 )
lowerCAmelCase_ = x * (1 + scale) + shift
return x
| 279 | 0 |
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = FlaxAutoencoderKL
@property
def __UpperCAmelCase ( self ):
__a = 4
__a = 3
__a = (32, 32)
__a = jax.random.PRNGKey(0 )
__a = jax.random.uniform(_a , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __UpperCAmelCase ( self ):
__a = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
__a = self.dummy_input
return init_dict, inputs_dict
| 695 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def __UpperCamelCase ( _A : str , _A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase : Optional[int] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
lowerCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(_A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def __UpperCamelCase ( _A : str , _A : DatasetInfo ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase : str = str(_A )
dataset_info.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfo.from_directory(_A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_A , 'dataset_info.json' ) )
def __UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowerCAmelCase : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(_A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCAmelCase : Any = yaml.safe_dump(_A )
lowerCAmelCase : int = yaml.safe_load(_A )
assert dataset_info_yaml_dict == reloaded
def __UpperCamelCase ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase : Union[str, Any] = DatasetInfo()
lowerCAmelCase : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def __UpperCamelCase ( _A : Tuple , _A : DatasetInfosDict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase : Tuple = str(_A )
dataset_infos_dict.write_to_directory(_A )
lowerCAmelCase : List[str] = DatasetInfosDict.from_directory(_A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCAmelCase : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCAmelCase : Optional[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_A , 'README.md' ) )
| 646 |
'''simple docstring'''
_lowerCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def __UpperCamelCase ( _A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def __UpperCamelCase ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 646 | 1 |
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return EnvironmentCommand()
def lowerCamelCase ( lowerCamelCase : Any):
return EnvironmentCommand(args.accelerate_config_file)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _a ( _a : ArgumentParser ):
'''simple docstring'''
A_ : str = parser.add_parser("""env""" )
download_parser.set_defaults(func=_a )
download_parser.add_argument(
"""--accelerate-config_file""" ,default=_a ,help="""The accelerate config file to use for the default values in the launching script.""" ,)
download_parser.set_defaults(func=_a )
def __init__( self : Optional[int] ,_a : Optional[Any] ,*_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = accelerate_config_file
def _a ( self : Dict ):
'''simple docstring'''
A_ : Optional[int] = """not installed"""
if is_safetensors_available():
import safetensors
A_ : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
A_ : Optional[Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
A_ : List[str] = """not installed"""
A_ : Union[str, Any] = """not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A_ : Dict = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_a ):
A_ : List[str] = load_config_from_file(self._accelerate_config_file ).to_dict()
A_ : List[str] = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(_a ,_a )
else f'\t{accelerate_config}'
)
A_ : Tuple = """not installed"""
A_ : Dict = """NA"""
if is_torch_available():
import torch
A_ : Union[str, Any] = torch.__version__
A_ : List[Any] = torch.cuda.is_available()
A_ : Dict = """not installed"""
A_ : Optional[int] = """NA"""
if is_tf_available():
import tensorflow as tf
A_ : str = tf.__version__
try:
# deprecated in v2.1
A_ : Union[str, Any] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A_ : Union[str, Any] = bool(tf.config.list_physical_devices("""GPU""" ) )
A_ : Dict = """not installed"""
A_ : List[str] = """not installed"""
A_ : Dict = """not installed"""
A_ : str = """NA"""
if is_flax_available():
import flax
import jax
import jaxlib
A_ : Dict = flax.__version__
A_ : Tuple = jax.__version__
A_ : List[str] = jaxlib.__version__
A_ : Any = jax.lib.xla_bridge.get_backend().platform
A_ : List[str] = {
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": f'{safetensors_version}',
"""Accelerate version""": f'{accelerate_version}',
"""Accelerate config""": f'{accelerate_config_str}',
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Tensorflow version (GPU?)""": f'{tf_version} ({tf_cuda_available})',
"""Flax version (CPU?/GPU?/TPU?)""": f'{flax_version} ({jax_backend})',
"""Jax version""": f'{jax_version}',
"""JaxLib version""": f'{jaxlib_version}',
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(_a ) )
return info
@staticmethod
def _a ( _a : Union[str, Any] ):
'''simple docstring'''
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
a : List[Any] = logging.get_logger(__name__)
a : str = """T5Config"""
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
UpperCAmelCase : Tuple = jnp.zeros_like(_lowercase )
UpperCAmelCase : int = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
UpperCAmelCase : Any = shifted_input_ids.at[:, 0].set(_lowercase )
UpperCAmelCase : Tuple = jnp.where(shifted_input_ids == -1_0_0 , _lowercase , _lowercase )
return shifted_input_ids
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'mt5'
lowercase = MTaConfig
| 717 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 0 |
'''simple docstring'''
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( A ) -> str:
assert type(A ) in (int, float) and decimal == int(A )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
if decimal < 0:
lowerCAmelCase__ = True
decimal *= -1
while decimal > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 16 )
lowerCAmelCase__ = values[remainder] + hexadecimal
lowerCAmelCase__ = '''0x''' + hexadecimal
if negative:
lowerCAmelCase__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 624 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_snake_case : List[Any] = pd.read_csv("""sample_data.csv""", header=None)
_snake_case : Optional[int] = df.shape[:1][0]
# If you're using some other dataset input the target column
_snake_case : Union[str, Any] = df.iloc[:, 1:2]
_snake_case : Optional[int] = actual_data.values.reshape(len_data, 1)
_snake_case : Dict = MinMaxScaler().fit_transform(actual_data)
_snake_case : Any = 10
_snake_case : Any = 5
_snake_case : str = 20
_snake_case : Union[str, Any] = len_data - periods * look_back
_snake_case : Any = actual_data[:division]
_snake_case : List[Any] = actual_data[division - look_back :]
_snake_case : Tuple = [], []
_snake_case : List[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_snake_case : int = np.array(train_x)
_snake_case : Optional[int] = np.array(test_x)
_snake_case : Tuple = np.array([list(i.ravel()) for i in train_y])
_snake_case : Tuple = np.array([list(i.ravel()) for i in test_y])
_snake_case : List[str] = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_snake_case : str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_snake_case : Any = model.predict(x_test) | 700 |
'''simple docstring'''
def _a ( _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = int(_SCREAMING_SNAKE_CASE )
if n_element < 1:
_SCREAMING_SNAKE_CASE = ValueError("a should be a positive number" )
raise my_error
_SCREAMING_SNAKE_CASE = [1]
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = (0, 0, 0)
_SCREAMING_SNAKE_CASE = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_snake_case : int = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
_snake_case : str = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"The list with nth numbers is: {hamming_numbers}")
print("""-----------------------------------------------------""") | 493 | 0 |
def UpperCamelCase ( __magic_name__ : int ) -> Optional[Any]:
"""simple docstring"""
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
lowercase__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
lowercase__ = 1
if upper_limit > 0:
lowercase__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_UpperCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
A : List[str] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 15 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_a = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowerCAmelCase_ = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
lowerCAmelCase_ = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
lowerCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCamelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCamelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowerCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowerCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCAmelCase__() -> int:
'''simple docstring'''
lowerCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' ,datefmt='''%m/%d/%Y %H:%M:%S''' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
lowerCamelCase__ = training_args.get_process_log_level()
logger.setLevel(__snake_case )
datasets.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.set_verbosity(__snake_case )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase__ = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCamelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCamelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCamelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCamelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCamelCase__ = load_dataset('''csv''' ,data_files=__snake_case ,cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCamelCase__ = load_dataset('''json''' ,data_files=__snake_case ,cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCamelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCamelCase__ = len(__snake_case )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=__snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# load tapex tokenizer
lowerCamelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,add_prefix_space=__snake_case ,)
lowerCamelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) ,config=__snake_case ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Padding strategy
if data_args.pad_to_max_length:
lowerCamelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCamelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCamelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCamelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCamelCase__ = min(data_args.max_seq_length ,tokenizer.model_max_length )
def preprocess_tabfact_function(__snake_case ):
# Tokenize the texts
def _convert_table_text_to_pandas(__snake_case ):
lowerCamelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCamelCase__ = pd.DataFrame.from_records(_table_content[1:] ,columns=_table_content[0] )
return _table_pd
lowerCamelCase__ = examples['''statement''']
lowerCamelCase__ = list(map(_convert_table_text_to_pandas ,examples['''table_text'''] ) )
lowerCamelCase__ = tokenizer(__snake_case ,__snake_case ,padding=__snake_case ,max_length=__snake_case ,truncation=__snake_case )
lowerCamelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCamelCase__ = raw_datasets.map(
__snake_case ,batched=__snake_case ,load_from_cache_file=not data_args.overwrite_cache ,desc='''Running tokenizer on dataset''' ,)
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCamelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCamelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__snake_case ) ) ,3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__snake_case ):
lowerCamelCase__ = p.predictions[0] if isinstance(p.predictions ,__snake_case ) else p.predictions
lowerCamelCase__ = np.argmax(__snake_case ,axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCamelCase__ = default_data_collator
elif training_args.fpaa:
lowerCamelCase__ = DataCollatorWithPadding(__snake_case ,pad_to_multiple_of=8 )
else:
lowerCamelCase__ = None
# Initialize our Trainer
lowerCamelCase__ = Trainer(
model=__snake_case ,args=__snake_case ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=__snake_case ,tokenizer=__snake_case ,data_collator=__snake_case ,)
# Training
if training_args.do_train:
lowerCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase__ = last_checkpoint
lowerCamelCase__ = trainer.train(resume_from_checkpoint=__snake_case )
lowerCamelCase__ = train_result.metrics
lowerCamelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__snake_case )
)
lowerCamelCase__ = min(__snake_case ,len(__snake_case ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' ,__snake_case )
trainer.save_metrics('''train''' ,__snake_case )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase__ = trainer.evaluate(eval_dataset=__snake_case )
lowerCamelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__snake_case )
lowerCamelCase__ = min(__snake_case ,len(__snake_case ) )
trainer.log_metrics('''eval''' ,__snake_case )
trainer.save_metrics('''eval''' ,__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCamelCase__ = predict_dataset.remove_columns('''label''' )
lowerCamelCase__ = trainer.predict(__snake_case ,metric_key_prefix='''predict''' ).predictions
lowerCamelCase__ = np.argmax(__snake_case ,axis=1 )
lowerCamelCase__ = os.path.join(training_args.output_dir ,'''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(__snake_case ,'''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(__snake_case ):
lowerCamelCase__ = label_list[item]
writer.write(F'{index}\t{item}\n' )
lowerCamelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**__snake_case )
else:
trainer.create_model_card(**__snake_case )
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 706 |
from math import sqrt
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' must been an int and positive"
lowerCamelCase__ = True
# 0 and 1 are none primes.
if number <= 1:
lowerCamelCase__ = False
for divisor in range(2 ,int(round(sqrt(__snake_case ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
lowerCamelCase__ = False
break
# precondition
assert isinstance(__snake_case ,__snake_case ), "'status' must been from type bool"
return status
def lowerCAmelCase__(__snake_case ) -> Any:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
lowerCamelCase__ = list(range(2 ,n + 1 ) )
lowerCamelCase__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__snake_case ) ):
for j in range(i + 1 ,len(__snake_case ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
lowerCamelCase__ = 0
# filters actual prime numbers.
lowerCamelCase__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n > 2), "'N' must been an int and > 2"
lowerCamelCase__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 ,n + 1 ):
if is_prime(__snake_case ):
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and number >= 0, "'number' must been an int and >= 0"
lowerCamelCase__ = [] # this list will be returns of the function.
# potential prime number factors.
lowerCamelCase__ = 2
lowerCamelCase__ = number
if number == 0 or number == 1:
ans.append(__snake_case )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__snake_case ):
while quotient != 1:
if is_prime(__snake_case ) and (quotient % factor == 0):
ans.append(__snake_case )
quotient /= factor
else:
factor += 1
else:
ans.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type list"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = max(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number >= 0
), "'number' bust been an int and >= 0"
lowerCamelCase__ = 0
# prime factorization of 'number'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = min(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ), "'ans' must been from type int"
return ans
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 == 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ), "'number' must been an int"
assert isinstance(number % 2 != 0 ,__snake_case ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase__(__snake_case ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case ) and (number > 2) and is_even(__snake_case )
), "'number' must been an int, even and > 2"
lowerCamelCase__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
lowerCamelCase__ = get_prime_numbers(__snake_case )
lowerCamelCase__ = len(__snake_case )
# run variable for while-loops.
lowerCamelCase__ = 0
lowerCamelCase__ = None
# exit variable. for break up the loops
lowerCamelCase__ = True
while i < len_pn and loop:
lowerCamelCase__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
lowerCamelCase__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (len(__snake_case ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> str:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 0
while numbera != 0:
lowerCamelCase__ = numbera % numbera
lowerCamelCase__ = numbera
lowerCamelCase__ = rest
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
lowerCamelCase__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
lowerCamelCase__ = prime_factorization(__snake_case )
lowerCamelCase__ = prime_factorization(__snake_case )
elif numbera == 1 or numbera == 1:
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = max(__snake_case ,__snake_case )
lowerCamelCase__ = 0
lowerCamelCase__ = 0
lowerCamelCase__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(max(__snake_case ,__snake_case ) ):
ans *= n
else:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
lowerCamelCase__ = prime_fac_a.count(__snake_case )
for _ in range(__snake_case ):
ans *= n
done.append(__snake_case )
# precondition
assert isinstance(__snake_case ,__snake_case ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'number' must been a positive int"
lowerCamelCase__ = 0
lowerCamelCase__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__snake_case ):
ans += 1
# precondition
assert isinstance(__snake_case ,__snake_case ) and is_prime(
__snake_case ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Dict:
'''simple docstring'''
assert (
is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
lowerCamelCase__ = p_number_a + 1 # jump to the next number
lowerCamelCase__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
while number < p_number_a:
ans.append(__snake_case )
number += 1
# fetch the next prime number.
while not is_prime(__snake_case ):
number += 1
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and ans[0] != p_number_a
and ans[len(__snake_case ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase__(__snake_case ) -> Tuple:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 1), "'n' must been int and >= 1"
lowerCamelCase__ = [] # will be returned.
for divisor in range(1 ,n + 1 ):
if n % divisor == 0:
ans.append(__snake_case )
# precondition
assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (
number > 1
), "'number' must been an int and >= 1"
lowerCamelCase__ = get_divisors(__snake_case )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (divisors[0] == 1)
and (divisors[len(__snake_case ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Tuple:
'''simple docstring'''
assert (
isinstance(__snake_case ,__snake_case )
and isinstance(__snake_case ,__snake_case )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
lowerCamelCase__ = gcd(abs(__snake_case ) ,abs(__snake_case ) )
# precondition
assert (
isinstance(__snake_case ,__snake_case )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase__(__snake_case ) -> Optional[int]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been a int and >= 0"
lowerCamelCase__ = 1 # this will be return.
for factor in range(1 ,n + 1 ):
ans *= factor
return ans
def lowerCAmelCase__(__snake_case ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(__snake_case ,__snake_case ) and (n >= 0), "'n' must been an int and >= 0"
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 1 # this will be return
for _ in range(n - 1 ):
lowerCamelCase__ = ans
ans += fiba
lowerCamelCase__ = tmp
return ans
| 29 | 0 |
'''simple docstring'''
from string import ascii_uppercase
UpperCamelCase__: List[str] = {str(ord(c) - 55): c for c in ascii_uppercase}
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> str:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
UpperCAmelCase : List[str] = ''''''
UpperCAmelCase : Any = 0
UpperCAmelCase : List[str] = 0
while div != 1:
UpperCAmelCase , UpperCAmelCase : int = divmod(_lowerCAmelCase , _lowerCAmelCase )
if base >= 11 and 9 < mod < 36:
UpperCAmelCase : Union[str, Any] = ALPHABET_VALUES[str(_lowerCAmelCase )]
else:
UpperCAmelCase : Optional[Any] = str(_lowerCAmelCase )
new_value += actual_value
UpperCAmelCase : Dict = num // base
UpperCAmelCase : Dict = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_lowerCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 127 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ ( _lowerCAmelCase : list[int] ) -> int:
if not nums:
return 0
UpperCAmelCase : Tuple = nums[0]
UpperCAmelCase : List[str] = 0
for num in nums[1:]:
UpperCAmelCase , UpperCAmelCase : str = (
max_excluding + num,
max(_lowerCAmelCase , _lowerCAmelCase ),
)
return max(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 | 1 |
import fire
from utils import calculate_rouge, save_json
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: str ,__UpperCamelCase: Optional[Any]=None ,**__UpperCamelCase: int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [x.strip() for x in open(__UpperCamelCase ).readlines()]
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )]
SCREAMING_SNAKE_CASE : List[str] = calculate_rouge(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase )
if save_path is not None:
save_json(__UpperCamelCase ,__UpperCamelCase ,indent=__UpperCamelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 700 |
'''simple docstring'''
class _a :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : dict[str, TrieNode] = {} # Mapping from char to TrieNode
SCREAMING_SNAKE_CASE : int = False
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
for word in words:
self.insert(A )
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self
for char in word:
if char not in curr.nodes:
SCREAMING_SNAKE_CASE : Any = TrieNode()
SCREAMING_SNAKE_CASE : Optional[int] = curr.nodes[char]
SCREAMING_SNAKE_CASE : Dict = True
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self
for char in word:
if char not in curr.nodes:
return False
SCREAMING_SNAKE_CASE : int = curr.nodes[char]
return curr.is_leaf
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
def _delete(A, A, A ) -> bool:
if index == len(A ):
# If word does not exist
if not curr.is_leaf:
return False
SCREAMING_SNAKE_CASE : Optional[int] = False
return len(curr.nodes ) == 0
SCREAMING_SNAKE_CASE : List[Any] = word[index]
SCREAMING_SNAKE_CASE : Union[str, Any] = curr.nodes.get(A )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
SCREAMING_SNAKE_CASE : Dict = _delete(A, A, index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self, A, 0 )
def lowercase__( __UpperCamelCase: TrieNode ,__UpperCamelCase: str ):
"""simple docstring"""
if node.is_leaf:
print(__UpperCamelCase ,end=' ' )
for key, value in node.nodes.items():
print_words(__UpperCamelCase ,word + key )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 'banana bananas bandana band apple all beast'.split()
SCREAMING_SNAKE_CASE : List[Any] = TrieNode()
root.insert_many(__UpperCamelCase )
# print_words(root, "")
assert all(root.find(__UpperCamelCase ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def lowercase__( __UpperCamelCase: str ,__UpperCamelCase: bool ):
"""simple docstring"""
print(str(__UpperCamelCase ) ,'works!' if passes else 'doesn\'t work :(' )
def lowercase__( ):
"""simple docstring"""
assert test_trie()
def lowercase__( ):
"""simple docstring"""
print_results('Testing trie functionality' ,test_trie() )
if __name__ == "__main__":
main()
| 508 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :float , snake_case_ :float ):
if density <= 0:
raise ValueError('''Impossible fluid density''' )
if bulk_modulus <= 0:
raise ValueError('''Impossible bulk modulus''' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 |
def a ( snake_case__: int ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = {'configuration_vit_mae': ['VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMAEConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMAEForPreTraining',
'ViTMAELayer',
'ViTMAEModel',
'ViTMAEPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TFViTMAEForPreTraining',
'TFViTMAEModel',
'TFViTMAEPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 709 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = process
_lowerCAmelCase = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
_lowerCAmelCase = self.dataset[i]
_lowerCAmelCase = self.process(_lowerCAmelCase , **self.params )
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
_lowerCAmelCase = loader
_lowerCAmelCase = infer
_lowerCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_lowerCAmelCase = None
_lowerCAmelCase = loader_batch_size
# Internal bookkeeping
_lowerCAmelCase = None
_lowerCAmelCase = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_lowerCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_lowerCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Convert ModelOutput to tuple first
_lowerCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCAmelCase , _lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_lowerCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_lowerCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_lowerCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_lowerCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_lowerCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_lowerCAmelCase = self._loader_batch_data.__class__(_lowerCAmelCase )
self._loader_batch_index += 1
return result
def __lowerCAmelCase ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_lowerCAmelCase = next(self.iterator )
_lowerCAmelCase = self.infer(_lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_lowerCAmelCase = processed
_lowerCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
_lowerCAmelCase = None
return self
def __lowerCAmelCase ( self ):
if self.subiterator is None:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_lowerCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
_lowerCAmelCase = next(self.subiterator )
return processed
class UpperCAmelCase ( snake_case_ ):
def __iter__( self ):
_lowerCAmelCase = iter(self.loader )
return self
def __lowerCAmelCase ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_lowerCAmelCase = False
_lowerCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
_lowerCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCAmelCase , torch.Tensor ):
_lowerCAmelCase = processed
else:
_lowerCAmelCase = list(processed.keys() )[0]
_lowerCAmelCase = processed[key]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = len(_lowerCAmelCase )
else:
_lowerCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_lowerCAmelCase = observed_batch_size
_lowerCAmelCase = processed
_lowerCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_lowerCAmelCase = self.loader_batch_item()
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
if is_last:
return accumulator
else:
_lowerCAmelCase = processed
_lowerCAmelCase = item.pop('''is_last''' )
accumulator.append(_lowerCAmelCase )
return accumulator
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return self.dataset[i][self.key]
class UpperCAmelCase ( snake_case_ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = dataset
_lowerCAmelCase = keya
_lowerCAmelCase = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , _lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]} | 664 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__snake_case = threading.Lock()
__snake_case = None
__snake_case = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__snake_case = logging.WARNING
__snake_case = True
def _A ( ):
UpperCamelCase :List[str] = os.getenv('''TRANSFORMERS_VERBOSITY''' , _UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def _A ( ):
return __name__.split('''.''' )[0]
def _A ( ):
return logging.getLogger(_get_library_name() )
def _A ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
UpperCamelCase :Dict = logging.StreamHandler() # Set sys.stderr as stream.
UpperCamelCase :Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
UpperCamelCase :List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
UpperCamelCase :str = False
def _A ( ):
global _default_handler
with _lock:
if not _default_handler:
return
UpperCamelCase :List[str] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
UpperCamelCase :Any = None
def _A ( ):
return log_levels
def _A ( SCREAMING_SNAKE_CASE__ : List[str] = None ):
if name is None:
UpperCamelCase :Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCAmelCase )
def _A ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCAmelCase )
def _A ( ):
return set_verbosity(_UpperCAmelCase )
def _A ( ):
return set_verbosity(_UpperCAmelCase )
def _A ( ):
return set_verbosity(_UpperCAmelCase )
def _A ( ):
return set_verbosity(_UpperCAmelCase )
def _A ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _A ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCAmelCase )
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCAmelCase )
def _A ( ):
_configure_library_root_logger()
UpperCamelCase :List[str] = False
def _A ( ):
_configure_library_root_logger()
UpperCamelCase :List[Any] = True
def _A ( ):
UpperCamelCase :Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
UpperCamelCase :int = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(_UpperCAmelCase )
def _A ( ):
UpperCamelCase :List[Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCAmelCase )
def _A ( self : Optional[int] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :Optional[int] = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , _UpperCAmelCase )
if no_advisory_warnings:
return
self.warning(*_UpperCAmelCase , **_UpperCAmelCase )
__snake_case = warning_advice
@functools.lru_cache(_UpperCAmelCase )
def _A ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ):
self.warning(*_UpperCAmelCase , **_UpperCAmelCase )
__snake_case = warning_once
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple: # pylint: disable=unused-argument
UpperCamelCase :Optional[Any] = args[0] if args else None
def __iter__( self ) -> int:
return iter(self._iterator )
def __getattr__( self , SCREAMING_SNAKE_CASE_ ) -> str:
def empty_fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Any:
return self
def __exit__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
if _tqdm_active:
return tqdm_lib.tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
return EmptyTqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__snake_case = _tqdm_cls()
def _A ( ):
global _tqdm_active
return bool(_tqdm_active )
def _A ( ):
global _tqdm_active
UpperCamelCase :Dict = True
hf_hub_utils.enable_progress_bars()
def _A ( ):
global _tqdm_active
UpperCamelCase :str = False
hf_hub_utils.disable_progress_bars()
| 658 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str=14 , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=99 , UpperCAmelCase : str=32 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : List[Any]=0.0_2 , ) -> List[Any]:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : Tuple = batch_size
lowerCamelCase__ : Optional[Any] = seq_length
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_input_mask
lowerCamelCase__ : Optional[Any] = use_token_type_ids
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Optional[Any] = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Optional[Any] = rotary_dim
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Tuple = intermediate_size
lowerCamelCase__ : Union[str, Any] = hidden_act
lowerCamelCase__ : List[Any] = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : Dict = max_position_embeddings
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
lowerCamelCase__ : str = vocab_size - 1
def A_ ( self : str ) -> int:
lowerCamelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCamelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : Tuple = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
lowerCamelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = config_and_inputs
lowerCamelCase__ : List[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def A_ ( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Tuple = 20
lowerCamelCase__ : Dict = model_class_name(UpperCAmelCase )
lowerCamelCase__ : Dict = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCamelCase__ : int = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCamelCase__ : List[str] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def A_ ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Optional[Any]:
lowerCamelCase__ : Any = 20
lowerCamelCase__ : Any = model_class_name(UpperCAmelCase )
lowerCamelCase__ : List[str] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCamelCase__ : str = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCamelCase__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCamelCase__ : List[Any] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : Optional[Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCamelCase__ : List[Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCamelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCAmelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A_ ( self : Optional[Any] ) -> Tuple:
lowerCamelCase__ : Optional[Any] = FlaxGPTJModelTester(self )
def A_ ( self : Tuple ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A_ ( self : Dict ) -> Tuple:
lowerCamelCase__ : str = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
lowerCamelCase__ : Dict = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCamelCase__ : str = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Optional[int] = model.config.eos_token_id
lowerCamelCase__ : Union[str, Any] = jax.jit(model.generate )
lowerCamelCase__ : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCamelCase__ : int = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Dict = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : Union[str, Any] = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = pt_inputs['input_ids'].shape
lowerCamelCase__ : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : Optional[int] = 1
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Dict = pt_model_class(UpperCAmelCase ).eval()
lowerCamelCase__ : str = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCamelCase__ : Tuple = fx_state
with torch.no_grad():
lowerCamelCase__ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : Dict = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCamelCase__ : int = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A_ ( self : Union[str, Any] ) -> int:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCamelCase__ : Tuple = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCamelCase__ : Union[str, Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCamelCase__ : int = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : List[Any] = pt_model_class(UpperCAmelCase ).eval()
lowerCamelCase__ : str = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCamelCase__ : List[str] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = pt_inputs['input_ids'].shape
lowerCamelCase__ : Dict = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : List[str] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCamelCase__ : Any = pt_model(**UpperCAmelCase ).to_tuple()
lowerCamelCase__ : List[Any] = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCamelCase__ : Tuple = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A_ ( self : List[str] ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCamelCase__ : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 295 | 0 |
"""simple docstring"""
import json
import sys
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
with open(lowerCAmelCase__ , encoding='utf-8' ) as f:
lowerCAmelCase_ : int = json.load(lowerCAmelCase__ )
lowerCAmelCase_ : Dict = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = results[benchmark_name]
lowerCAmelCase_ : Dict = benchmark_name.split('/' )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
lowerCAmelCase_ : Tuple = '| metric |'
lowerCAmelCase_ : str = '|--------|'
lowerCAmelCase_ : Optional[Any] = '| new / old (diff) |'
for metric_name in sorted(lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = benchmark_res[metric_name]
lowerCAmelCase_ : Dict = metric_vals['new']
lowerCAmelCase_ : Optional[Any] = metric_vals.get('old' , lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = metric_vals.get('diff' , lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = f" {new_val:f}" if isinstance(lowerCAmelCase__ , (int, float) ) else 'None'
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(lowerCAmelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(lowerCAmelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(lowerCAmelCase__ ) )
if __name__ == "__main__":
lowercase__ : int = sys.argv[1]
lowercase__ : Tuple = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 317 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""pixel_values"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, int]] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Union[int, float] = 1 / 2_5_5 , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Tuple = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ , default_to_square=SCREAMING_SNAKE_CASE_ , param_name='crop_size' )
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : Dict = do_rescale
lowerCAmelCase_ : int = do_normalize
lowerCAmelCase_ : int = do_center_crop
lowerCAmelCase_ : Any = crop_size
lowerCAmelCase_ : Any = size
lowerCAmelCase_ : str = resample
lowerCAmelCase_ : Any = rescale_factor
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCAmelCase_ : List[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase_ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "shortest_edge" in size:
lowerCAmelCase_ : List[str] = get_resize_output_image_size(SCREAMING_SNAKE_CASE_ , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
lowerCAmelCase_ : List[str] = (size['height'], size['width'])
else:
raise ValueError(F"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase_ : str = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(SCREAMING_SNAKE_CASE_ , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Any ):
return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Union[float, List[float]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[float] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE_ , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = resample if resample is not None else self.resample
lowerCAmelCase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Dict = size if size is not None else self.size
lowerCAmelCase_ : Optional[int] = get_size_dict(SCREAMING_SNAKE_CASE_ )
if not is_batched(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : str = [images]
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if do_resize:
lowerCAmelCase_ : int = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_center_crop:
lowerCAmelCase_ : List[Any] = [self.center_crop(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images]
if do_normalize:
lowerCAmelCase_ : Tuple = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Dict = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowerCAmelCase_ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
| 317 | 1 |
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=99 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ) ->Optional[Any]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = NezhaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) ->List[Any]:
lowerCAmelCase = True
lowerCAmelCase = NezhaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = NezhaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = NezhaForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Dict:
lowerCAmelCase = NezhaForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = NezhaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = self.num_labels
lowerCAmelCase = NezhaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->int:
lowerCAmelCase = self.num_labels
lowerCAmelCase = NezhaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Dict:
lowerCAmelCase = self.num_choices
lowerCAmelCase = NezhaForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ : str = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) ->List[str]:
lowerCAmelCase = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = NezhaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
# This regression test was failing with PyTorch < 1.3
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase = None
self.model_tester.create_and_check_model_as_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = NezhaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase = True
lowerCAmelCase = model_class(config=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''bert.pt''' ) )
lowerCAmelCase = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''bert.pt''' ) , map_location=__SCREAMING_SNAKE_CASE )
loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE ) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE ) )
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' )
lowerCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 312 | from timeit import timeit
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE_ ( ) -> None:
def do_benchmark(snake_case__ ) -> None:
lowerCAmelCase = '''import __main__ as z'''
print(f"Benchmark when {number = }:" )
print(f"{get_set_bits_count_using_modulo_operator(snake_case__ ) = }" )
lowerCAmelCase = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=snake_case__ )
print(f"timeit() runs in {timing} seconds" )
print(f"{get_set_bits_count_using_brian_kernighans_algorithm(snake_case__ ) = }" )
lowerCAmelCase = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=snake_case__ , )
print(f"timeit() runs in {timing} seconds" )
for number in (2_5, 3_7, 5_8, 0):
do_benchmark(snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 312 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case : Any = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = state_dict.pop(UpperCamelCase )
A = val
def A__ ( UpperCamelCase ):
A = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
A = value
else:
A = value
return new_state_dict
def A__ ( UpperCamelCase ):
A = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[:256, :]
A = in_proj_bias[:256]
A = in_proj_weight[256:512, :]
A = in_proj_bias[256:512]
A = in_proj_weight[-256:, :]
A = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A = state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
A = state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A = in_proj_weight_cross_attn[:256, :]
A = in_proj_bias_cross_attn[:256]
A = in_proj_weight_cross_attn[256:512, :]
A = in_proj_bias_cross_attn[256:512]
A = in_proj_weight_cross_attn[-256:, :]
A = in_proj_bias_cross_attn[-256:]
def A__ ( UpperCamelCase , UpperCamelCase ):
A, A = image.size
A = max(UpperCamelCase , UpperCamelCase )
A = 800 if "detection" in checkpoint_url else 1_000
A = target_max_size / current_max_size
A = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def A__ ( UpperCamelCase ):
A = F.to_tensor(UpperCamelCase )
A = F.normalize(UpperCamelCase , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
logger.info("Converting model..." )
# load original state dict
A = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A = rename_backbone_keys(UpperCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
A = state_dict.pop(UpperCamelCase )
A = val
# create HuggingFace model and load state dict
A = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A = 15
A = 2
A = {0: "table", 1: "table rotated"}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 125
A = 6
A = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1_000 )
A = TableTransformerForObjectDetection(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify our conversion
A = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
A = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=UpperCamelCase )
A = Image.open(UpperCamelCase ).convert("RGB" )
A = normalize(resize(UpperCamelCase , UpperCamelCase ) ).unsqueeze(0 )
A = model(UpperCamelCase )
if "detection" in checkpoint_url:
A = (1, 15, 3)
A = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A = (1, 125, 7)
A = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
A = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(UpperCamelCase )
image_processor.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_snake_case : Any = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 706 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : Union[str, Any] = 'T5Config'
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A = jnp.zeros_like(UpperCamelCase )
A = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A = shifted_input_ids.at[:, 0].set(UpperCamelCase )
A = jnp.where(shifted_input_ids == -100 , UpperCamelCase , UpperCamelCase )
return shifted_input_ids
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
class _UpperCAmelCase ( lowercase_ ):
UpperCamelCase = '''mt5'''
UpperCamelCase = MTaConfig
| 524 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''microsoft/layoutlmv3-base''': '''https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json''',
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Dict = '''layoutlmv3'''
def __init__(self , __magic_name__=5_0265 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-5 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=1024 , __magic_name__=128 , __magic_name__=128 , __magic_name__=True , __magic_name__=32 , __magic_name__=128 , __magic_name__=64 , __magic_name__=256 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=224 , __magic_name__=3 , __magic_name__=16 , __magic_name__=None , **__magic_name__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , )
snake_case_ : List[Any] = max_ad_position_embeddings
snake_case_ : Optional[int] = coordinate_size
snake_case_ : Tuple = shape_size
snake_case_ : str = has_relative_attention_bias
snake_case_ : int = rel_pos_bins
snake_case_ : Optional[Any] = max_rel_pos
snake_case_ : List[str] = has_spatial_attention_bias
snake_case_ : Tuple = rel_ad_pos_bins
snake_case_ : Optional[int] = max_rel_ad_pos
snake_case_ : Union[str, Any] = text_embed
snake_case_ : str = visual_embed
snake_case_ : Dict = input_size
snake_case_ : Any = num_channels
snake_case_ : str = patch_size
snake_case_ : List[Any] = classifier_dropout
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = version.parse('''1.12''' )
@property
def lowerCamelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def lowerCamelCase (self ) -> float:
'''simple docstring'''
return 1e-5
@property
def lowerCamelCase (self ) -> int:
'''simple docstring'''
return 12
def lowerCamelCase (self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , __magic_name__ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ : Dict = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ : Optional[int] = processor.tokenizer.num_special_tokens_to_add(__magic_name__ )
snake_case_ : Any = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ : str = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ : Any = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ : Optional[int] = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
snake_case_ : List[Any] = dict(
processor(
__magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) )
return inputs
| 60 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
# fmt: off
lowerCamelCase__ : Tuple = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
lowerCamelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
lowerCamelCase__ : Any = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
lowerCamelCase__ : int = os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : List[str] , **UpperCAmelCase : str ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : Optional[int] , **UpperCAmelCase : List[str] ) -> List[str]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A_ ( self : int ) -> Any:
shutil.rmtree(self.tmpdirname )
def A_ ( self : str ) -> str:
lowerCamelCase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase__ : Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self : Dict ) -> Dict:
lowerCamelCase__ : List[Any] = self.get_tokenizer()
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : List[str] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : List[str] ) -> Tuple:
lowerCamelCase__ : List[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase__ : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCamelCase__ : int = self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
lowerCamelCase__ : List[Any] = self.get_image_processor()
lowerCamelCase__ : Any = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Optional[int] = self.prepare_image_inputs()
lowerCamelCase__ : int = image_processor(UpperCAmelCase , return_tensors='np' )
lowerCamelCase__ : List[str] = processor(images=UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A_ ( self : Tuple ) -> List[Any]:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : str = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Tuple = 'lower newer'
lowerCamelCase__ : Tuple = processor(text=UpperCAmelCase )
lowerCamelCase__ : Any = tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self : Optional[int] ) -> Any:
lowerCamelCase__ : Optional[int] = self.get_image_processor()
lowerCamelCase__ : Tuple = self.get_tokenizer()
lowerCamelCase__ : Tuple = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : str = 'lower newer'
lowerCamelCase__ : Dict = self.prepare_image_inputs()
lowerCamelCase__ : int = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def A_ ( self : Optional[Any] ) -> List[str]:
lowerCamelCase__ : Dict = self.get_image_processor()
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase__ : List[str] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase__ : Dict = processor.batch_decode(UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Optional[int] ) -> List[str]:
lowerCamelCase__ : Tuple = self.get_image_processor()
lowerCamelCase__ : List[str] = self.get_tokenizer()
lowerCamelCase__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowerCamelCase__ : Dict = 'lower newer'
lowerCamelCase__ : List[Any] = self.prepare_image_inputs()
lowerCamelCase__ : Union[str, Any] = processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 295 | 0 |
'''simple docstring'''
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
a : int = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
a : Tuple = components[:-1] + [test_fn.replace('.py' , '' )]
a : Union[str, Any] = '.'.join(snake_case )
return test_module_path
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
a : List[str] = get_module_path(snake_case )
a : Tuple = importlib.import_module(snake_case )
return test_module
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
a : Optional[int] = []
a : str = get_test_module(snake_case )
for attr in dir(snake_case ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(snake_case , snake_case ) )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> str:
"""simple docstring"""
a : Dict = []
a : List[str] = get_test_module(snake_case )
for attr in dir(snake_case ):
a : int = getattr(snake_case , snake_case )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
a : Optional[Any] = getattr(snake_case , 'all_model_classes' , [] )
if len(snake_case ) > 0:
test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
a : Dict = get_test_classes(snake_case )
a : List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> str:
"""simple docstring"""
a : Dict = test_class()
if hasattr(snake_case , 'setUp' ):
test.setUp()
a : Union[str, Any] = None
if hasattr(snake_case , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
a : Tuple = test.model_tester.__class__
return model_tester
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
a : Optional[int] = get_test_classes(snake_case )
a : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Any = get_test_classes_for_model(snake_case , snake_case )
a : Tuple = []
for test_class in test_classes:
a : Any = get_model_tester_from_test_class(snake_case )
if tester_class is not None:
tester_classes.append(snake_case )
# sort with class names
return sorted(snake_case , key=lambda snake_case : x.__name__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> str:
"""simple docstring"""
a : Dict = get_test_classes(snake_case )
a : Optional[Any] = {test_class: get_model_tester_from_test_class(snake_case ) for test_class in test_classes}
return test_tester_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a : Dict = get_model_classes(snake_case )
a : Optional[Any] = {
model_class: get_test_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_test_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : str = get_model_classes(snake_case )
a : List[Any] = {
model_class: get_tester_classes_for_model(snake_case , snake_case ) for model_class in model_classes
}
return model_to_tester_mapping
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Union[str, Any]:
"""simple docstring"""
if isinstance(snake_case , snake_case ):
return o
elif isinstance(snake_case , snake_case ):
return o.__name__
elif isinstance(snake_case , (list, tuple) ):
return [to_json(snake_case ) for x in o]
elif isinstance(snake_case , snake_case ):
return {to_json(snake_case ): to_json(snake_case ) for k, v in o.items()}
else:
return o
| 610 | '''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : str = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase : List[str] = {
"""camembert-base""": 512,
}
UpperCamelCase : List[Any] = """▁"""
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Any = ["input_ids", "attention_mask"]
def __init__( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : Optional[Any]="</s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : int="<pad>" , UpperCAmelCase_ : Tuple="<mask>" , UpperCAmelCase_ : int=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
a : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(UpperCAmelCase_))
a : Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a : str = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
a : Optional[Any] = len(self.fairseq_tokens_to_ids)
a : List[str] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
a : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : List[Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str):
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase_) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[str] = []
a : List[str] = ''
a : Optional[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_) + token
a : Tuple = True
a : Optional[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase_)
a : int = False
out_string += self.sp_model.decode(UpperCAmelCase_)
return out_string.strip()
def __getstate__( self : Union[str, Any]):
"""simple docstring"""
a : str = self.__dict__.copy()
a : List[Any] = None
return state
def __setstate__( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : Tuple = {}
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Optional[Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
| 610 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
a__ : Tuple = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
a__ : Optional[int] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
with open(snake_case_ , "r" ) as file:
for line_number, line in enumerate(snake_case_ ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for attribute in key.split("." ):
__SCREAMING_SNAKE_CASE = getattr(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split("." )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(snake_case_ , snake_case_ ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split("." ):
__SCREAMING_SNAKE_CASE = getattr(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__SCREAMING_SNAKE_CASE = getattr(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(snake_case_ ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split("." )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
a__ : List[Any] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(snake_case_ )[0].split("." )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
else:
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return is_used
return is_used
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(snake_case_ , snake_case_ , snake_case_ )
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1]
__SCREAMING_SNAKE_CASE = name.split("." )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ):
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(snake_case_ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(snake_case_ )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(snake_case_ )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
feature_extractor.save_pretrained(snake_case_ )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(snake_case_ , "vocab.json" )
if not os.path.isdir(snake_case_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(snake_case_ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(snake_case_ , snake_case_ )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case_ , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(snake_case_ )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(snake_case_ )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="audio_pretraining" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(snake_case_ )
__SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case_ )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , not is_finetuned )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
a__ : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
a__ : Union[str, Any] = parser.parse_args()
a__ : int = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 682 |
"""simple docstring"""
import math
import random
def A_ ( snake_case_ : float ,snake_case_ : bool = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__A : int = 0.02
def A_ ( snake_case_ : int ,snake_case_ : int ):
'''simple docstring'''
UpperCamelCase : List[str] = float(2 * (random.randint(1 ,1_0_0 )) - 1 )
for _ in range(snake_case_ ):
# Forward propagation
UpperCamelCase : Any = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCamelCase : Tuple = (expected / 1_0_0) - layer_a
# Error delta
UpperCamelCase : List[str] = layer_1_error * sigmoid_function(snake_case_ ,snake_case_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_0_0
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Dict = int(input('''Expected value: '''))
__A : List[Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 499 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase : Tuple = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
lowercase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 584 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __A ( self : List[str] ):
A_ = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
A_ = AutoTokenizer.from_pretrained("xlm-roberta-base" )
A_ = "The dog is cute and lives in the garden house"
A_ = jnp.array([tokenizer.encode(UpperCAmelCase )] )
A_ = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
A_ = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
A_ = model(UpperCAmelCase )["last_hidden_state"]
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1E-3 ) ) | 86 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__a :Dict = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Dict = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Tuple = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 86 | 1 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__a : Optional[int] = ["""bart.large""", """bart.large.mnli""", """bart.large.cnn""", """bart_xsum/model.pt"""]
__a : Any = {"""bart.large""": BartModel, """bart.large.mnli""": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("""0.9.0"""):
raise Exception("""requires fairseq >= 0.9.0""")
logging.set_verbosity_info()
__a : Any = logging.get_logger(__name__)
__a : Optional[int] = """ Hello world! cécé herlolip"""
__a : List[Any] = [
("""model.classification_heads.mnli.dense.weight""", """classification_head.dense.weight"""),
("""model.classification_heads.mnli.dense.bias""", """classification_head.dense.bias"""),
("""model.classification_heads.mnli.out_proj.weight""", """classification_head.out_proj.weight"""),
("""model.classification_heads.mnli.out_proj.bias""", """classification_head.out_proj.bias"""),
]
def __magic_name__ ( lowercase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
]
for k in ignore_keys:
state_dict.pop(lowercase_ , lowercase_ )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase = dct.pop(lowercase_ )
UpperCamelCase = val
def __magic_name__ ( lowercase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )
UpperCamelCase = torch.hub.load("pytorch/fairseq" , "bart.large.cnn" ).eval()
hub_interface.model.load_state_dict(sd["model"] )
return hub_interface
def __magic_name__ ( lowercase_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = emb.weight.shape
UpperCamelCase = nn.Linear(lowercase_ , lowercase_ , bias=lowercase_ )
UpperCamelCase = emb.weight.data
return lin_layer
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not os.path.exists(lowercase_ ):
UpperCamelCase = torch.hub.load("pytorch/fairseq" , lowercase_ ).eval()
else:
UpperCamelCase = load_xsum_checkpoint(lowercase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCamelCase = checkpoint_path.replace("." , "-" )
UpperCamelCase = BartConfig.from_pretrained(lowercase_ )
UpperCamelCase = bart.encode(lowercase_ ).unsqueeze(0 )
UpperCamelCase = BartTokenizer.from_pretrained(lowercase_ ).encode(lowercase_ , return_tensors="pt" ).unsqueeze(0 )
if not torch.eq(lowercase_ , lowercase_ ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
UpperCamelCase = bart.state_dict()
remove_ignore_keys_(lowercase_ )
UpperCamelCase = state_dict["model.decoder.embed_tokens.weight"]
for src, dest in mnli_rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = BartForSequenceClassification(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
UpperCamelCase = bart.predict("mnli" , lowercase_ , return_logits=lowercase_ )
UpperCamelCase = model(lowercase_ )[0] # logits
else: # no classification heads to worry about
UpperCamelCase = bart.model.state_dict()
remove_ignore_keys_(lowercase_ )
UpperCamelCase = state_dict["decoder.embed_tokens.weight"]
UpperCamelCase = bart.extract_features(lowercase_ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCamelCase = BartModel(lowercase_ ).eval()
model.load_state_dict(lowercase_ )
UpperCamelCase = model(lowercase_ ).model[0]
else:
UpperCamelCase = BartForConditionalGeneration(lowercase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowercase_ )
if hasattr(lowercase_ , "lm_head" ):
UpperCamelCase = make_linear_from_emb(model.model.shared )
UpperCamelCase = model.model(lowercase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("Some values in `fairseq_output` are different from `new_model_outputs`" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""", default=None, type=str, help="""Which huggingface architecture to use: bart-large-xsum"""
)
__a : Tuple = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 414 |
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """WhisperFeatureExtractor"""
lowercase = """WhisperTokenizer"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase = self.feature_extractor
UpperCamelCase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Any:
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=SCREAMING_SNAKE_CASE , language=SCREAMING_SNAKE_CASE , no_timestamps=SCREAMING_SNAKE_CASE )
def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("audio" , SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("sampling_rate" , SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop("text" , SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase = args[0]
UpperCamelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCamelCase = self.feature_extractor(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , sampling_rate=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is not None:
UpperCamelCase = self.tokenizer(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCamelCase = encodings["input_ids"]
return inputs
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="np" ) -> int:
"""simple docstring"""
return self.tokenizer.get_prompt_ids(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE )
| 414 | 1 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = XGLMTokenizer
lowerCamelCase__ : int = XGLMTokenizerFast
lowerCamelCase__ : str = True
lowerCamelCase__ : List[str] = True
def lowercase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(A_ , keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''<pad>'''
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(A_ ) , 10_08 )
def lowercase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_08 )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(A_ , keep_accents=A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def lowercase_ ( self ):
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(A_ , f.name )
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(f.name , keep_accents=A_ )
SCREAMING_SNAKE_CASE__ = pickle.dumps(A_ )
pickle.loads(A_ )
def lowercase_ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = '''I was born in 92000, and this is falsé.'''
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ , add_special_tokens=A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(A_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''Hello World!'''
SCREAMING_SNAKE_CASE__ = [2, 3_12_27, 44_47, 35]
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
SCREAMING_SNAKE_CASE__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35]
# fmt: on
self.assertListEqual(A_ , self.big_tokenizer.encode(A_ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {
'''input_ids''': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='''facebook/xglm-564M''' , padding=A_ , )
| 100 |
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
__lowerCamelCase : int = 637_8137.0
__lowerCamelCase : Any = 635_6752.31_4245
__lowerCamelCase : List[Any] = 637_8137
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
lowercase = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase = (b_lata + b_lata) / 2
lowercase = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
lowercase = cos(sigma / 2 ) ** 2
lowercase = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
lowercase = sin(sigma / 2 ) ** 2
lowercase = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 310 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__A = logging.get_logger(__name__)
class a_ ( snake_case__ ):
_snake_case = ["""input_features""", """attention_mask"""]
def __init__(self , __a=8_0 , __a=1_6_0_0_0 , __a=8_0 , __a=0.0 , __a=True , __a=True , __a=True , **__a , ) -> Dict:
"""simple docstring"""
super().__init__(feature_size=_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , padding_value=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__snake_case : Dict = num_mel_bins
__snake_case : Any = do_ceptral_normalize
__snake_case : Dict = normalize_means
__snake_case : int = normalize_vars
__snake_case : str = True
def SCREAMING_SNAKE_CASE__ (self , __a , ) -> np.ndarray:
"""simple docstring"""
__snake_case : List[str] = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
__snake_case : int = torch.from_numpy(_SCREAMING_SNAKE_CASE).unsqueeze(0)
__snake_case : List[str] = ta_kaldi.fbank(_SCREAMING_SNAKE_CASE , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE__ (__a , __a , __a = True , __a = True , __a = 0.0 , ) -> np.ndarray:
"""simple docstring"""
if normalize_means:
__snake_case : Union[str, Any] = x[:input_length].mean(axis=0)
__snake_case : Tuple = np.subtract(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if normalize_vars:
__snake_case : Optional[Any] = x[:input_length].std(axis=0)
__snake_case : Optional[int] = np.divide(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
if input_length < x.shape[0]:
__snake_case : List[str] = padding_value
# make sure array is in float32
__snake_case : Tuple = x.astype(np.floataa)
return x
def SCREAMING_SNAKE_CASE__ (self , __a , __a = None) -> List[np.ndarray]:
"""simple docstring"""
__snake_case : int = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
]
def __call__(self , __a , __a = False , __a = None , __a = False , __a = None , __a = None , __a = None , __a = None , **__a , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__snake_case : Dict = isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""")
__snake_case : Optional[int] = is_batched_numpy or (
isinstance(_SCREAMING_SNAKE_CASE , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__snake_case : Any = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray):
__snake_case : Tuple = np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa)
elif isinstance(_SCREAMING_SNAKE_CASE , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__snake_case : Union[str, Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__snake_case : str = [raw_speech]
# extract fbank features
__snake_case : List[Any] = [self._extract_fbank_features(_SCREAMING_SNAKE_CASE) for waveform in raw_speech]
# convert into correct format for padding
__snake_case : str = BatchFeature({'input_features': features})
__snake_case : Dict = self.pad(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# make sure list is in array format
__snake_case : List[str] = padded_inputs.get('input_features')
if isinstance(input_features[0] , _SCREAMING_SNAKE_CASE):
__snake_case : Union[str, Any] = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.floataa) for feature in input_features]
__snake_case : Optional[int] = padded_inputs.get('attention_mask')
if attention_mask is not None:
__snake_case : int = [np.asarray(_SCREAMING_SNAKE_CASE , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__snake_case : Any = (
np.array(_SCREAMING_SNAKE_CASE , dtype=np.intaa)
if self._get_padding_strategies(_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE) is not PaddingStrategy.DO_NOT_PAD
else None
)
__snake_case : Union[str, Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=_SCREAMING_SNAKE_CASE)
if return_tensors is not None:
__snake_case : str = padded_inputs.convert_to_tensors(_SCREAMING_SNAKE_CASE)
return padded_inputs | 721 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
__snake_case : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = 0
while number > 0:
__snake_case : Dict = number % 10
sum_of_digits += last_digit
__snake_case : Union[str, Any] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _SCREAMING_SNAKE_CASE ( A : int = 1_00 ) -> int:
"""simple docstring"""
__snake_case : List[Any] = factorial(A )
__snake_case : Dict = split_and_add(A )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 61 | 0 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__snake_case )] )
__lowerCAmelCase = np.array(__snake_case )
__lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __snake_case ) ) , x.transpose() ) , __snake_case )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = (1, 2, 1)
__lowerCAmelCase = (1, 1, 0, 7)
__lowerCAmelCase = SARIMAX(
__snake_case , exog=__snake_case , order=__snake_case , seasonal_order=__snake_case )
__lowerCAmelCase = model.fit(disp=__snake_case , maxiter=600 , method="nm" )
__lowerCAmelCase = model_fit.predict(1 , len(__snake_case ) , exog=[test_match] )
return result[0]
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
__lowerCAmelCase = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__snake_case , __snake_case )
__lowerCAmelCase = regressor.predict(__snake_case )
return y_pred[0]
def __lowerCAmelCase ( __snake_case ):
train_user.sort()
__lowerCAmelCase = np.percentile(__snake_case , 25 )
__lowerCAmelCase = np.percentile(__snake_case , 75 )
__lowerCAmelCase = qa - qa
__lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def __lowerCAmelCase ( __snake_case , __snake_case ):
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
__lowerCAmelCase = not_safe + 1
else:
if abs(abs(__snake_case ) - abs(__snake_case ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowerCamelCase : List[str] = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowerCamelCase : Optional[int] = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
lowerCamelCase : List[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
lowerCamelCase : Union[str, Any] = normalize_df[:, 2].tolist()
lowerCamelCase : Union[str, Any] = normalize_df[:, 0].tolist()
lowerCamelCase : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowerCamelCase : Dict = normalize_df[:, [1, 2]].tolist()
lowerCamelCase : Tuple = x[: len(x) - 1]
lowerCamelCase : str = x[len(x) - 1 :]
# for linear regression & sarimax
lowerCamelCase : int = total_date[: len(total_date) - 1]
lowerCamelCase : Optional[Any] = total_user[: len(total_user) - 1]
lowerCamelCase : List[str] = total_match[: len(total_match) - 1]
lowerCamelCase : List[Any] = total_date[len(total_date) - 1 :]
lowerCamelCase : Optional[int] = total_user[len(total_user) - 1 :]
lowerCamelCase : str = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowerCamelCase : List[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowerCamelCase : Tuple = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 367 |
import numpy as np
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case = 1E-12 , __snake_case = 100 , ):
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(__snake_case )[0] == np.shape(__snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(__snake_case ) == np.iscomplexobj(__snake_case )
__lowerCAmelCase = np.iscomplexobj(__snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(__snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(__snake_case , __snake_case )
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(__snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(__snake_case , np.dot(__snake_case , __snake_case ) )
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def __lowerCAmelCase ( ):
__lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowerCAmelCase = np.array([41, 4, 20] )
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
__lowerCAmelCase = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(__snake_case , __snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(__snake_case )
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(__snake_case ) - np.abs(__snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 367 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case__ : List[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ["pixel_values"]
def __init__( self : str , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : Tuple , ) ->None:
super().__init__(**__a )
lowerCamelCase_ : Optional[Any] = size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ : int = get_size_dict(__a , default_to_square=__a )
lowerCamelCase_ : Optional[Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ : Union[str, Any] = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
lowerCamelCase_ : List[Any] = do_resize
lowerCamelCase_ : Dict = size
lowerCamelCase_ : int = resample
lowerCamelCase_ : Optional[int] = do_center_crop
lowerCamelCase_ : List[Any] = crop_size
lowerCamelCase_ : Optional[Any] = do_rescale
lowerCamelCase_ : List[str] = rescale_factor
lowerCamelCase_ : int = do_normalize
lowerCamelCase_ : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ : int = do_convert_rgb
def _lowerCAmelCase ( self : int , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) ->np.ndarray:
lowerCamelCase_ : Optional[int] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowerCamelCase_ : Any = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : List[str] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) ->np.ndarray:
lowerCamelCase_ : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def _lowerCAmelCase ( self : Dict , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) ->str:
return rescale(__a , scale=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : List[str] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) ->np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def _lowerCAmelCase ( self : List[str] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : Dict , ) ->PIL.Image.Image:
lowerCamelCase_ : str = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ : Dict = size if size is not None else self.size
lowerCamelCase_ : str = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
lowerCamelCase_ : Union[str, Any] = resample if resample is not None else self.resample
lowerCamelCase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ : str = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
lowerCamelCase_ : List[str] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ : str = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ : Optional[int] = image_std if image_std is not None else self.image_std
lowerCamelCase_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ : Dict = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ : Tuple = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ : List[Any] = [to_numpy_array(__a ) for image in images]
if do_resize:
lowerCamelCase_ : Any = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
lowerCamelCase_ : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
lowerCamelCase_ : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCamelCase_ : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCamelCase_ : str = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 171 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "detr"
_a = ["past_key_values"]
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , __a : Dict=True , __a : Union[str, Any]=None , __a : Union[str, Any]=3 , __a : Dict=100 , __a : str=6 , __a : List[str]=2_048 , __a : Any=8 , __a : List[str]=6 , __a : List[str]=2_048 , __a : str=8 , __a : Tuple=0.0 , __a : Dict=0.0 , __a : Optional[int]=True , __a : Union[str, Any]="relu" , __a : Optional[int]=256 , __a : Tuple=0.1 , __a : List[str]=0.0 , __a : Tuple=0.0 , __a : Tuple=0.02 , __a : Optional[Any]=1.0 , __a : List[str]=False , __a : Optional[int]="sine" , __a : Optional[Any]="resnet50" , __a : Optional[int]=True , __a : Dict=False , __a : Union[str, Any]=1 , __a : Optional[Any]=5 , __a : List[Any]=2 , __a : Any=1 , __a : int=1 , __a : List[str]=5 , __a : int=2 , __a : Any=0.1 , **__a : List[Any] , ) ->str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
lowerCamelCase_ : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : List[str] = config_class.from_dict(__a )
# set timm attributes to None
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = None, None, None
lowerCamelCase_ : Dict = use_timm_backbone
lowerCamelCase_ : Optional[Any] = backbone_config
lowerCamelCase_ : List[Any] = num_channels
lowerCamelCase_ : int = num_queries
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : List[str] = encoder_attention_heads
lowerCamelCase_ : Any = decoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_layers
lowerCamelCase_ : List[Any] = decoder_attention_heads
lowerCamelCase_ : Optional[Any] = dropout
lowerCamelCase_ : List[str] = attention_dropout
lowerCamelCase_ : List[Any] = activation_dropout
lowerCamelCase_ : Union[str, Any] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : Optional[Any] = init_xavier_std
lowerCamelCase_ : Any = encoder_layerdrop
lowerCamelCase_ : List[Any] = decoder_layerdrop
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : Any = auxiliary_loss
lowerCamelCase_ : Tuple = position_embedding_type
lowerCamelCase_ : Optional[int] = backbone
lowerCamelCase_ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase_ : int = dilation
# Hungarian matcher
lowerCamelCase_ : str = class_cost
lowerCamelCase_ : Union[str, Any] = bbox_cost
lowerCamelCase_ : Tuple = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[int] = mask_loss_coefficient
lowerCamelCase_ : int = dice_loss_coefficient
lowerCamelCase_ : str = bbox_loss_coefficient
lowerCamelCase_ : List[str] = giou_loss_coefficient
lowerCamelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : List[str] ) ->int:
return self.d_model
@classmethod
def _lowerCAmelCase ( cls : Tuple , __a : PretrainedConfig , **__a : Dict ) ->Optional[int]:
return cls(backbone_config=__a , **__a )
def _lowerCAmelCase ( self : List[Any] ) ->Dict[str, any]:
lowerCamelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ : List[str] = self.backbone_config.to_dict()
lowerCamelCase_ : Union[str, Any] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self : Optional[Any] ) ->float:
return 1e-5
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return 12
| 171 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Any = logging.get_logger(__name__)
def A__ ( lowerCamelCase ) -> Any:
print("""Loading config file...""" )
def flatten_yaml_as_dict(lowerCamelCase , lowerCamelCase="" , lowerCamelCase="." ):
UpperCamelCase_: int = []
for k, v in d.items():
UpperCamelCase_: int = parent_key + sep + k if parent_key else k
if isinstance(lowerCamelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCamelCase , lowerCamelCase , sep=lowerCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(lowerCamelCase )
UpperCamelCase_: Dict = argparse.Namespace()
with open(lowerCamelCase , """r""" ) as yaml_file:
try:
UpperCamelCase_: Dict = yaml.load(lowerCamelCase , Loader=yaml.FullLoader )
UpperCamelCase_: str = flatten_yaml_as_dict(lowerCamelCase )
for k, v in flat_cfg.items():
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(lowerCamelCase , str(lowerCamelCase ) ) )
return config
def A__ ( lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
UpperCamelCase_: int = MobileViTVaConfig()
UpperCamelCase_: List[Any] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
UpperCamelCase_: Optional[int] = 10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
UpperCamelCase_: Tuple = 3_84
else:
UpperCamelCase_: str = 2_56
UpperCamelCase_: List[Any] = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
UpperCamelCase_: Optional[Any] = 2_10_00
if int(task_name.strip().split("""_""" )[-1] ) == 3_84:
UpperCamelCase_: int = 3_84
else:
UpperCamelCase_: Optional[Any] = 2_56
UpperCamelCase_: List[str] = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
UpperCamelCase_: List[Any] = 1_51
UpperCamelCase_: Union[str, Any] = 5_12
UpperCamelCase_: List[str] = """ade20k-id2label.json"""
UpperCamelCase_: str = True
elif task_name.startswith("""voc_""" ):
UpperCamelCase_: Optional[Any] = 21
UpperCamelCase_: List[Any] = 5_12
UpperCamelCase_: Dict = """pascal-voc-id2label.json"""
UpperCamelCase_: str = True
# orig_config
UpperCamelCase_: List[str] = load_orig_config_file(lowerCamelCase )
assert getattr(lowerCamelCase , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
UpperCamelCase_: Optional[Any] = getattr(lowerCamelCase , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(lowerCamelCase , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
UpperCamelCase_: Union[str, Any] = getattr(lowerCamelCase , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
UpperCamelCase_: int = getattr(lowerCamelCase , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
UpperCamelCase_: Any = getattr(lowerCamelCase , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
UpperCamelCase_: Optional[Any] = getattr(lowerCamelCase , """model.segmentation.deeplabv3.aspp_out_channels""" , 5_12 )
UpperCamelCase_: Any = getattr(lowerCamelCase , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
UpperCamelCase_: Union[str, Any] = """huggingface/label-files"""
UpperCamelCase_: Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase_: List[Any] = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_: Optional[Any] = idalabel
UpperCamelCase_: List[str] = {v: k for k, v in idalabel.items()}
return config
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Dict:
UpperCamelCase_: Any = dct.pop(lowerCamelCase )
UpperCamelCase_: Any = val
def A__ ( lowerCamelCase , lowerCamelCase=False ) -> Dict:
if base_model:
UpperCamelCase_: str = """"""
else:
UpperCamelCase_: Optional[int] = """mobilevitv2."""
UpperCamelCase_: Tuple = []
for k in state_dict.keys():
if k[:8] == "encoder.":
UpperCamelCase_: int = k[8:]
else:
UpperCamelCase_: int = k
if ".block." in k:
UpperCamelCase_: Optional[int] = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
UpperCamelCase_: Tuple = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
UpperCamelCase_: Optional[int] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
UpperCamelCase_: List[Any] = k_new.replace("""conv_1.""" , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
UpperCamelCase_: List[str] = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
UpperCamelCase_: Dict = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
UpperCamelCase_: int = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
UpperCamelCase_: Union[str, Any] = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
UpperCamelCase_: str = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
UpperCamelCase_: str = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
UpperCamelCase_: Optional[int] = [0, 1]
elif i == 4:
UpperCamelCase_: Optional[int] = [0, 1, 2, 3]
elif i == 5:
UpperCamelCase_: Any = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
UpperCamelCase_: List[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
UpperCamelCase_: List[Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
UpperCamelCase_: Tuple = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
UpperCamelCase_: Optional[Any] = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
UpperCamelCase_: List[str] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
UpperCamelCase_: Optional[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
UpperCamelCase_: List[Any] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
UpperCamelCase_: Tuple = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
UpperCamelCase_: List[Any] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
UpperCamelCase_: Any = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
UpperCamelCase_: str = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
UpperCamelCase_: List[Any] = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def A__ ( lowerCamelCase ) -> int:
UpperCamelCase_: Tuple = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(lowerCamelCase )
for k in keys_to_ignore:
state_dict.pop(lowerCamelCase , lowerCamelCase )
def A__ ( ) -> List[Any]:
UpperCamelCase_: List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
UpperCamelCase_: List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Tuple:
UpperCamelCase_: Tuple = get_mobilevitva_config(lowerCamelCase , lowerCamelCase )
# load original state_dict
UpperCamelCase_: Optional[int] = torch.load(lowerCamelCase , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
UpperCamelCase_: Tuple = MobileViTVaForSemanticSegmentation(lowerCamelCase ).eval()
UpperCamelCase_: int = False
else:
UpperCamelCase_: Tuple = MobileViTVaForImageClassification(lowerCamelCase ).eval()
UpperCamelCase_: Optional[Any] = False
# remove and rename some keys of load the original model
UpperCamelCase_: List[Any] = checkpoint
remove_unused_keys(lowerCamelCase )
UpperCamelCase_: Tuple = create_rename_keys(lowerCamelCase , base_model=lowerCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# load modified state_dict
model.load_state_dict(lowerCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
UpperCamelCase_: Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
UpperCamelCase_: int = image_processor(images=prepare_img() , return_tensors="""pt""" )
UpperCamelCase_: Dict = model(**lowerCamelCase )
# verify classification model
if task_name.startswith("""imagenet""" ):
UpperCamelCase_: Union[str, Any] = outputs.logits
UpperCamelCase_: Optional[int] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
UpperCamelCase_: Optional[Any] = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4 )
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCamelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
lowerCamelCase_ : Optional[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 548 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Tuple = (CMStochasticIterativeScheduler,)
__UpperCamelCase : List[str] = 10
def lowerCAmelCase__ ( self : Optional[int] , **snake_case_ : List[str] ):
UpperCamelCase_: Optional[int] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Tuple = 10
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[str] = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
UpperCamelCase_: int = scheduler.timesteps[0]
UpperCamelCase_: str = scheduler.timesteps[1]
UpperCamelCase_: Optional[int] = self.dummy_sample
UpperCamelCase_: int = 0.1 * sample
UpperCamelCase_: Optional[Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
UpperCamelCase_: Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self : Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase__ ( self : Optional[Any] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase__ ( self : Any ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: List[str] = self.get_scheduler_config()
UpperCamelCase_: Tuple = scheduler_class(**snake_case_ )
UpperCamelCase_: Optional[int] = 1
scheduler.set_timesteps(snake_case_ )
UpperCamelCase_: Any = scheduler.timesteps
UpperCamelCase_: int = torch.manual_seed(0 )
UpperCamelCase_: Any = self.dummy_model()
UpperCamelCase_: Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
UpperCamelCase_: str = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
UpperCamelCase_: List[Any] = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
UpperCamelCase_: str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase_: Union[str, Any] = pred_prev_sample
UpperCamelCase_: int = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase_: List[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Dict = self.scheduler_classes[0]
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[Any] = scheduler_class(**snake_case_ )
UpperCamelCase_: str = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
UpperCamelCase_: int = scheduler.timesteps
UpperCamelCase_: List[Any] = torch.manual_seed(0 )
UpperCamelCase_: List[Any] = self.dummy_model()
UpperCamelCase_: int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase_: Optional[int] = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
UpperCamelCase_: Dict = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
UpperCamelCase_: str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
UpperCamelCase_: List[str] = pred_prev_sample
UpperCamelCase_: Union[str, Any] = torch.sum(torch.abs(snake_case_ ) )
UpperCamelCase_: str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: int = self.scheduler_classes[0]
UpperCamelCase_: Optional[int] = self.get_scheduler_config()
UpperCamelCase_: Tuple = scheduler_class(**snake_case_ )
UpperCamelCase_: Any = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase__ ( self : str ):
UpperCamelCase_: Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase_: Tuple = self.get_scheduler_config()
UpperCamelCase_: List[str] = scheduler_class(**snake_case_ )
UpperCamelCase_: Tuple = [39, 30, 12, 1, 0]
UpperCamelCase_: List[Any] = len(snake_case_ )
with self.assertRaises(snake_case_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Any = self.scheduler_classes[0]
UpperCamelCase_: int = self.get_scheduler_config()
UpperCamelCase_: int = scheduler_class(**snake_case_ )
UpperCamelCase_: int = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 548 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE = StableDiffusionInstructPixaPixPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : str)-> List[str]:
torch.manual_seed(0)
__lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowerCAmelCase =PNDMScheduler(skip_prk_steps=snake_case_)
torch.manual_seed(0)
__lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0)
__lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCAmelCase =CLIPTextModel(snake_case_)
__lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""")
__lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase ( self : Any , snake_case_ : Union[str, Any] , snake_case_ : str=0)-> int:
__lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case_)).to(snake_case_)
__lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1)[0]
__lowerCAmelCase =Image.fromarray(np.uinta(snake_case_)).convert("""RGB""")
if str(snake_case_).startswith("""mps"""):
__lowerCAmelCase =torch.manual_seed(snake_case_)
else:
__lowerCAmelCase =torch.Generator(device=snake_case_).manual_seed(snake_case_)
__lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : Dict)-> str:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**snake_case_)
__lowerCAmelCase =sd_pipe.to(snake_case_)
sd_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_)
__lowerCAmelCase =sd_pipe(**snake_case_).images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase =np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : Optional[Any])-> str:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**snake_case_)
__lowerCAmelCase =sd_pipe.to(snake_case_)
sd_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_)
__lowerCAmelCase ="""french fries"""
__lowerCAmelCase =sd_pipe(**snake_case_ , negative_prompt=snake_case_)
__lowerCAmelCase =output.images
__lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase =np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : Any)-> Optional[int]:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**snake_case_)
__lowerCAmelCase =sd_pipe.to(snake_case_)
sd_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_)
__lowerCAmelCase =[inputs["""prompt"""]] * 2
__lowerCAmelCase =np.array(inputs["""image"""]).astype(np.floataa) / 2_5_5.0
__lowerCAmelCase =torch.from_numpy(snake_case_).unsqueeze(0).to(snake_case_)
__lowerCAmelCase =image / 2 + 0.5
__lowerCAmelCase =image.permute(0 , 3 , 1 , 2)
__lowerCAmelCase =image.repeat(2 , 1 , 1 , 1)
__lowerCAmelCase =sd_pipe(**snake_case_).images
__lowerCAmelCase =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
__lowerCAmelCase =np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : Tuple)-> Any:
__lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""")
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**snake_case_)
__lowerCAmelCase =sd_pipe.to(snake_case_)
sd_pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =self.get_dummy_inputs(snake_case_)
__lowerCAmelCase =sd_pipe(**snake_case_).images
__lowerCAmelCase =image[0, -3:, -3:, -1]
__lowerCAmelCase =[round(snake_case_ , 4) for x in image_slice.flatten().tolist()]
print(""",""".join([str(snake_case_) for x in slice]))
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase =np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def UpperCamelCase ( self : Any)-> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def UpperCamelCase ( self : List[Any])-> Optional[Any]:
__lowerCAmelCase =self.get_dummy_components()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**snake_case_)
__lowerCAmelCase =VaeImageProcessor(do_resize=snake_case_ , do_normalize=snake_case_)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
__lowerCAmelCase =pipe(**self.get_dummy_inputs_by_type(snake_case_ , input_image_type="""pt"""))[0]
__lowerCAmelCase =components["""vae"""]
__lowerCAmelCase =self.get_dummy_inputs_by_type(snake_case_ , input_image_type="""pt""")
for image_param in self.image_latents_params:
if image_param in inputs.keys():
__lowerCAmelCase =vae.encode(inputs[image_param]).latent_dist.mode()
__lowerCAmelCase =pipe(**snake_case_)[0]
__lowerCAmelCase =np.abs(out - out_latents_inputs).max()
self.assertLess(snake_case_ , 1e-4 , """passing latents as image input generate different result from passing image""")
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
def UpperCamelCase ( self : str)-> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : str , snake_case_ : Any=0)-> List[Any]:
__lowerCAmelCase =torch.manual_seed(snake_case_)
__lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""")
__lowerCAmelCase ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase ( self : Optional[Any])-> Union[str, Any]:
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case_)
pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**snake_case_).images
__lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase ( self : Any)-> Union[str, Any]:
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case_)
__lowerCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**snake_case_).images
__lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase ( self : Optional[int])-> Union[str, Any]:
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case_)
__lowerCAmelCase =DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**snake_case_).images
__lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase =np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def UpperCamelCase ( self : List[Any])-> Tuple:
__lowerCAmelCase =0
def callback_fn(snake_case_ : int , snake_case_ : int , snake_case_ : torch.FloatTensor) -> None:
__lowerCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
__lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase =latents[0, -3:, -3:, -1]
__lowerCAmelCase =np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
__lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase =latents[0, -3:, -3:, -1]
__lowerCAmelCase =np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
__lowerCAmelCase =False
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case_ , torch_dtype=torch.floataa)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
pipe.enable_attention_slicing()
__lowerCAmelCase =self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self : Optional[Any])-> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=snake_case_ , torch_dtype=torch.floataa)
__lowerCAmelCase =pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase =self.get_inputs()
__lowerCAmelCase =pipe(**snake_case_)
__lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCamelCase ( self : List[Any])-> List[str]:
__lowerCAmelCase =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
__lowerCAmelCase =inputs["""image"""].resize((5_04, 5_04))
__lowerCAmelCase ="""timbrooks/instruct-pix2pix"""
__lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case_ , safety_checker=snake_case_ , )
pipe.to(snake_case_)
pipe.set_progress_bar_config(disable=snake_case_)
pipe.enable_attention_slicing()
__lowerCAmelCase =pipe(**snake_case_)
__lowerCAmelCase =output.images[0]
__lowerCAmelCase =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
__lowerCAmelCase =np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
| 456 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> int: # picklable for multiprocessing
return x.sum()
def __lowerCAmelCase ( __lowerCamelCase : Any ) -> Any: # picklable for multiprocessing
return i + 1
@dataclass
class __a :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
class __a ( SCREAMING_SNAKE_CASE ):
def UpperCamelCase ( self : List[Any])-> Tuple:
__lowerCAmelCase ={}
__lowerCAmelCase =[]
__lowerCAmelCase =1
__lowerCAmelCase =[1, 2]
__lowerCAmelCase ={"""a""": 1, """b""": 2}
__lowerCAmelCase ={"""a""": [1, 2], """b""": [3, 4]}
__lowerCAmelCase ={"""a""": {"""1""": 1}, """b""": 2}
__lowerCAmelCase ={"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
__lowerCAmelCase ={}
__lowerCAmelCase =[]
__lowerCAmelCase =2
__lowerCAmelCase =[2, 3]
__lowerCAmelCase ={"""a""": 2, """b""": 3}
__lowerCAmelCase ={"""a""": [2, 3], """b""": [4, 5]}
__lowerCAmelCase ={"""a""": {"""1""": 2}, """b""": 3}
__lowerCAmelCase ={"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_) , snake_case_)
__lowerCAmelCase =2
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(map_nested(snake_case_ , snake_case_ , num_proc=snake_case_) , snake_case_)
__lowerCAmelCase ={"""a""": np.eye(2), """b""": np.zeros(3), """c""": np.ones(2)}
__lowerCAmelCase ={"""a""": 2, """b""": 0, """c""": 2}
__lowerCAmelCase ={
"""a""": np.eye(2).astype(snake_case_),
"""b""": np.zeros(3).astype(snake_case_),
"""c""": np.ones(2).astype(snake_case_),
}
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_) , snake_case_)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_) , snake_case_)
self.assertEqual(
{k: v.tolist() for k, v in map_nested(snake_case_ , snake_case_ , map_numpy=snake_case_ , num_proc=snake_case_).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(snake_case_): # can't pickle a local lambda
map_nested(lambda snake_case_: x + 1 , snake_case_ , num_proc=snake_case_)
def UpperCamelCase ( self : Any)-> int:
__lowerCAmelCase ={"""a""": 1, """b""": 2}
__lowerCAmelCase ={"""a""": 3, """b""": 4}
__lowerCAmelCase ={"""a""": 5, """b""": 6}
__lowerCAmelCase =sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))])
self.assertEqual(sorted(zip_dict(snake_case_ , snake_case_ , snake_case_)) , snake_case_)
def UpperCamelCase ( self : Optional[Any])-> Optional[int]:
class __a :
SCREAMING_SNAKE_CASE = "bar"
__lowerCAmelCase =Foo()
self.assertEqual(foo.my_attr , """bar""")
with temporary_assignment(snake_case_ , """my_attr""" , """BAR"""):
self.assertEqual(foo.my_attr , """BAR""")
self.assertEqual(foo.my_attr , """bar""")
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Union[str, Any]:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
__lowerCAmelCase ={f"""{i}""": i for i in range(__lowerCamelCase )}
__lowerCAmelCase =map_nested(lambda __lowerCamelCase : x + 10 , __lowerCamelCase , num_proc=__lowerCamelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __a ( SCREAMING_SNAKE_CASE ):
@require_tf
def UpperCamelCase ( self : Tuple)-> Optional[int]:
import tensorflow as tf
from tensorflow.keras import layers
__lowerCAmelCase =layers.Dense(2)
def gen_random_output():
__lowerCAmelCase =tf.random.uniform((1, 3))
return model(snake_case_).numpy()
with temp_seed(42 , set_tensorflow=snake_case_):
__lowerCAmelCase =gen_random_output()
with temp_seed(42 , set_tensorflow=snake_case_):
__lowerCAmelCase =gen_random_output()
__lowerCAmelCase =gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@require_torch
def UpperCamelCase ( self : Dict)-> Dict:
import torch
def gen_random_output():
__lowerCAmelCase =torch.nn.Linear(3 , 2)
__lowerCAmelCase =torch.rand(1 , 3)
return model(snake_case_).detach().numpy()
with temp_seed(42 , set_pytorch=snake_case_):
__lowerCAmelCase =gen_random_output()
with temp_seed(42 , set_pytorch=snake_case_):
__lowerCAmelCase =gen_random_output()
__lowerCAmelCase =gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
def UpperCamelCase ( self : Union[str, Any])-> List[Any]:
def gen_random_output():
return np.random.rand(1 , 3)
with temp_seed(42):
__lowerCAmelCase =gen_random_output()
with temp_seed(42):
__lowerCAmelCase =gen_random_output()
__lowerCAmelCase =gen_random_output()
np.testing.assert_equal(snake_case_ , snake_case_)
self.assertGreater(np.abs(outa - outa).sum() , 0)
@pytest.mark.parametrize("""input_data""" , [{}] )
def __lowerCAmelCase ( __lowerCamelCase : List[str] ) -> List[str]:
__lowerCAmelCase =NestedDataStructure(__lowerCamelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __lowerCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : int ) -> List[str]:
__lowerCAmelCase =NestedDataStructure(__lowerCamelCase ).flatten()
assert output == expected_output
def __lowerCAmelCase ( ) -> int:
__lowerCAmelCase =A(x=1 , y="""foobar""" )
__lowerCAmelCase ={"""x""": 1, """y""": """foobar"""}
assert asdict(__lowerCamelCase ) == expected_output
__lowerCAmelCase ={"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
__lowerCAmelCase ={"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(__lowerCamelCase ) == expected_output
with pytest.raises(__lowerCamelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __lowerCAmelCase ( __lowerCamelCase : str ) -> Dict:
return text.split()
def __lowerCAmelCase ( __lowerCamelCase : List[Any] ) -> Any:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __lowerCAmelCase ( ) -> Any:
with Pool(2 ) as pool:
__lowerCAmelCase =list(iflatmap_unordered(__lowerCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__lowerCamelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
__lowerCAmelCase =list(iflatmap_unordered(__lowerCamelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(__lowerCamelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
__lowerCAmelCase =[]
for yield_time, content in iflatmap_unordered(
__lowerCamelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__lowerCamelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(__lowerCamelCase ) == 4
| 456 | 1 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def A ( lowercase__ : int , lowercase__ : Union[str, Any]=False ) -> Dict:
try:
UpperCamelCase__ :str = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ :Dict = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ :Any = strtobool(lowercase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase = parse_flag_from_env("RUN_SLOW", default=False)
UpperCamelCase = parse_flag_from_env("RUN_REMOTE", default=False)
UpperCamelCase = parse_flag_from_env("RUN_LOCAL", default=True)
UpperCamelCase = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def A ( lowercase__ : Any ) -> List[Any]:
try:
import faiss # noqa
except ImportError:
UpperCamelCase__ :Optional[Any] = unittest.skip("""test requires faiss""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[str] ) -> List[str]:
try:
import regex # noqa
except ImportError:
UpperCamelCase__ :List[str] = unittest.skip("""test requires regex""" )(lowercase__ )
return test_case
def A ( lowercase__ : int ) -> List[str]:
try:
import elasticsearch # noqa
except ImportError:
UpperCamelCase__ :Tuple = unittest.skip("""test requires elasticsearch""" )(lowercase__ )
return test_case
def A ( lowercase__ : Dict ) -> str:
try:
import sqlalchemy # noqa
except ImportError:
UpperCamelCase__ :str = unittest.skip("""test requires sqlalchemy""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[str] ) -> List[str]:
if not config.TORCH_AVAILABLE:
UpperCamelCase__ :Tuple = unittest.skip("""test requires PyTorch""" )(lowercase__ )
return test_case
def A ( lowercase__ : Dict ) -> Union[str, Any]:
if not config.TF_AVAILABLE:
UpperCamelCase__ :List[str] = unittest.skip("""test requires TensorFlow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Tuple ) -> Optional[int]:
if not config.JAX_AVAILABLE:
UpperCamelCase__ :str = unittest.skip("""test requires JAX""" )(lowercase__ )
return test_case
def A ( lowercase__ : List[Any] ) -> Optional[Any]:
if not config.PIL_AVAILABLE:
UpperCamelCase__ :Optional[int] = unittest.skip("""test requires Pillow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Any ) -> Any:
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Union[str, Any] ) -> Any:
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Optional[Any] ) -> str:
def _require_spacy_model(lowercase__ : Tuple ):
try:
import spacy # noqa F401
spacy.load(lowercase__ )
except ImportError:
return unittest.skip("""test requires spacy""" )(lowercase__ )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(lowercase__ ) )(lowercase__ )
else:
return test_case
return _require_spacy_model
def A ( lowercase__ : int ) -> List[str]:
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : Dict ) -> List[str]:
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(lowercase__ )
else:
return test_case
def A ( lowercase__ : List[str] ) -> Dict:
if not _run_slow_tests or _run_slow_tests == 0:
UpperCamelCase__ :Optional[int] = unittest.skip("""test is slow""" )(lowercase__ )
return test_case
def A ( lowercase__ : Optional[Any] ) -> int:
if not _run_local_tests or _run_local_tests == 0:
UpperCamelCase__ :Dict = unittest.skip("""test is local""" )(lowercase__ )
return test_case
def A ( lowercase__ : Optional[Any] ) -> List[Any]:
if not _run_packaged_tests or _run_packaged_tests == 0:
UpperCamelCase__ :str = unittest.skip("""test is packaged""" )(lowercase__ )
return test_case
def A ( lowercase__ : int ) -> List[Any]:
if not _run_remote_tests or _run_remote_tests == 0:
UpperCamelCase__ :List[Any] = unittest.skip("""test requires remote""" )(lowercase__ )
return test_case
def A ( *lowercase__ : List[Any] ) -> str:
def decorate(cls : Optional[Any] ):
for name, fn in cls.__dict__.items():
if callable(lowercase__ ) and name.startswith("""test""" ):
for decorator in decorators:
UpperCamelCase__ :Any = decorator(lowercase__ )
setattr(cls , lowercase__ , lowercase__ )
return cls
return decorate
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
pass
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Tuple = 0
_snake_case : Any = 1
_snake_case : Union[str, Any] = 2
@contextmanager
def A ( lowercase__ : Dict=OfflineSimulationMode.CONNECTION_FAILS , lowercase__ : Tuple=1E-16 ) -> Union[str, Any]:
UpperCamelCase__ :List[str] = requests.Session().request
def timeout_request(lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , **lowercase__ : int ):
# Change the url to an invalid url so that the connection hangs
UpperCamelCase__ :Dict = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
UpperCamelCase__ :List[str] = timeout
try:
return online_request(lowercase__ , lowercase__ , **lowercase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
UpperCamelCase__ :Optional[int] = url
UpperCamelCase__ :Union[str, Any] = e.args[0]
UpperCamelCase__ :List[Any] = (max_retry_error.args[0].replace("""10.255.255.1""" , f"""OfflineMock[{url}]""" ),)
UpperCamelCase__ :Optional[Any] = (max_retry_error,)
raise
def raise_connection_error(lowercase__ : Union[str, Any] , lowercase__ : Tuple , **lowercase__ : Any ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=lowercase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , lowercase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def A ( *lowercase__ : Tuple , **lowercase__ : List[Any] ) -> Optional[int]:
UpperCamelCase__ :Any = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowercase__ , **lowercase__ ) as tmp_dir:
try:
os.chdir(lowercase__ )
yield
finally:
os.chdir(lowercase__ )
@contextmanager
def A ( ) -> List[Any]:
import gc
gc.collect()
UpperCamelCase__ :Tuple = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def A ( ) -> Optional[int]:
import gc
gc.collect()
UpperCamelCase__ :int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def A ( lowercase__ : List[str] , lowercase__ : List[str] ) -> str:
return deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowercase__ ).integers(0 , 100 , 10 ).tolist()
def A ( lowercase__ : str ) -> Union[str, Any]:
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowercase__ : int , *lowercase__ : str , **lowercase__ : Optional[int] ):
try:
return func(*lowercase__ , **lowercase__ )
except HTTPError as err:
if str(lowercase__ ).startswith("""500""" ) or str(lowercase__ ).startswith("""502""" ):
pytest.xfail(str(lowercase__ ) )
raise err
return decorator.decorator(_wrapper , lowercase__ )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] ):
UpperCamelCase__ :Union[str, Any] = returncode
UpperCamelCase__ :Tuple = stdout
UpperCamelCase__ :List[Any] = stderr
async def A ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Dict:
while True:
UpperCamelCase__ :str = await stream.readline()
if line:
callback(lowercase__ )
else:
break
async def A ( lowercase__ : Optional[Any] , lowercase__ : Tuple=None , lowercase__ : Union[str, Any]=None , lowercase__ : Dict=None , lowercase__ : Optional[Any]=False , lowercase__ : List[Any]=False ) -> _RunOutput:
if echo:
print("""\nRunning: """ , """ """.join(lowercase__ ) )
UpperCamelCase__ :int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowercase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowercase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ :Dict = []
UpperCamelCase__ :int = []
def tee(lowercase__ : int , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Tuple="" ):
UpperCamelCase__ :int = line.decode("""utf-8""" ).rstrip()
sink.append(lowercase__ )
if not quiet:
print(lowercase__ , lowercase__ , file=lowercase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda lowercase__ : tee(lowercase__ , lowercase__ , sys.stderr , label="""stderr:""" ) ),
] , timeout=lowercase__ , )
return _RunOutput(await p.wait() , lowercase__ , lowercase__ )
def A ( lowercase__ : Tuple , lowercase__ : Optional[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=180 , lowercase__ : Any=False , lowercase__ : Optional[Any]=True ) -> _RunOutput:
UpperCamelCase__ :Optional[Any] = asyncio.get_event_loop()
UpperCamelCase__ :Optional[int] = loop.run_until_complete(
_stream_subprocess(lowercase__ , env=lowercase__ , stdin=lowercase__ , timeout=lowercase__ , quiet=lowercase__ , echo=lowercase__ ) )
UpperCamelCase__ :Tuple = """ """.join(lowercase__ )
if result.returncode > 0:
UpperCamelCase__ :str = """\n""".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f"""'{cmd_str}' produced no output.""" )
return result
def A ( ) -> Optional[int]:
UpperCamelCase__ :str = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
UpperCamelCase__ :Tuple = re.sub(r"""^gw""" , """""" , lowercase__ , 0 , re.M )
return int(lowercase__ )
def A ( ) -> str:
UpperCamelCase__ :Optional[Any] = 2_9500
UpperCamelCase__ :List[str] = pytest_xdist_worker_id()
return port + uniq_delta | 45 |
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
UpperCamelCase__ = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
UpperCamelCase__ = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=False , lowercase__="dummy_doc" ) -> str:
__lowercase = {doc: key_lines}
__lowercase = {doc: sys_lines}
__lowercase = {}
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , key_doc_lines[doc] , lowercase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
__lowercase , __lowercase = reader.get_doc_mentions(lowercase__ , sys_doc_lines[doc] , lowercase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowercase = reader.set_annotated_parse_trees(lowercase__ , key_doc_lines[doc] , lowercase__ , lowercase__ )
if remove_nested:
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowercase , __lowercase = reader.remove_nested_coref_mentions(lowercase__ , lowercase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = reader.get_mention_assignments(lowercase__ , lowercase__ )
__lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" )
if not keep_singletons:
logger.info(
F"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system "
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
__lowercase = get_coref_infos(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
__lowercase = {}
__lowercase = 0
__lowercase = 0
for name, metric in metrics:
__lowercase , __lowercase , __lowercase = evaluator.evaluate_documents(lowercase__ , lowercase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"{name}/recall": recall, F"{name}/precision": precision, F"{name}/f1": fa} )
logger.info(
name.ljust(10 ) , F"Recall: {recall * 100:.2f}" , F" Precision: {precision * 100:.2f}" , F" F1: {fa * 100:.2f}" , )
if conll_subparts_num == 3:
__lowercase = (conll / 3) * 100
logger.info(F"CoNLL score: {conll:.2f}" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowercase__ ) -> List[Any]:
__lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowercase = line.split()[5]
if not parse_col == "-":
__lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def snake_case__ ( self : Tuple , lowercase : Dict , lowercase : Optional[int] , lowercase : Dict=True , lowercase : List[str]=False , lowercase : int=False , lowercase : Dict=False ) -> str:
"""simple docstring"""
__lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowercase = util.check_gold_parse_annotation(lowercase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowercase = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 634 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase__ = {
"facebook/esm-1b": "https://huggingface.co/facebook/esm-1b/resolve/main/config.json",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
lowercase__ : Union[str, Any] = """esm"""
def __init__( self : Any , lowercase : Optional[Any]=None , lowercase : Optional[int]=None , lowercase : List[Any]=None , lowercase : Optional[int]=768 , lowercase : str=12 , lowercase : Union[str, Any]=12 , lowercase : Dict=3_072 , lowercase : Optional[int]=0.1 , lowercase : str=0.1 , lowercase : Dict=1_026 , lowercase : Tuple=0.02 , lowercase : str=1E-1_2 , lowercase : Dict="absolute" , lowercase : Optional[Any]=True , lowercase : int=None , lowercase : int=False , lowercase : List[str]=False , lowercase : Tuple=None , lowercase : Tuple=None , **lowercase : Union[str, Any] , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = emb_layer_norm_before
__lowercase = token_dropout
__lowercase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
__lowercase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
__lowercase = EsmFoldConfig(**lowercase )
__lowercase = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
__lowercase = get_default_vocab_list()
else:
__lowercase = vocab_list
else:
__lowercase = None
__lowercase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowercase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
__lowercase = self.esmfold_config.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : str = None
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : bool = False
lowercase__ : float = 0
lowercase__ : bool = True
lowercase__ : bool = False
lowercase__ : int = 128
lowercase__ : "TrunkConfig" = None
def snake_case__ ( self : List[str] ) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowercase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
__lowercase = TrunkConfig(**self.trunk )
def snake_case__ ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.trunk.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 48
lowercase__ : int = 1_024
lowercase__ : int = 128
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : int = 32
lowercase__ : float = 0
lowercase__ : float = 0
lowercase__ : bool = False
lowercase__ : int = 4
lowercase__ : Optional[int] = 128
lowercase__ : "StructureModuleConfig" = None
def snake_case__ ( self : Tuple ) -> str:
"""simple docstring"""
if self.structure_module is None:
__lowercase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
__lowercase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
__lowercase = self.sequence_state_dim // self.sequence_head_width
__lowercase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = asdict(self )
__lowercase = self.structure_module.to_dict()
return output
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowercase__ : int = 384
lowercase__ : int = 128
lowercase__ : int = 16
lowercase__ : int = 128
lowercase__ : int = 12
lowercase__ : int = 4
lowercase__ : int = 8
lowercase__ : float = 0.1
lowercase__ : int = 8
lowercase__ : int = 1
lowercase__ : int = 2
lowercase__ : int = 7
lowercase__ : int = 10
lowercase__ : float = 1E-8
lowercase__ : float = 1E5
def snake_case__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
return asdict(self )
def UpperCAmelCase__ ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 634 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase__ = get_tests_dir("fixtures")
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__lowerCAmelCase ) as mock_head:
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self ):
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls ):
UpperCamelCase__ = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def _lowerCamelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def _lowerCamelCase ( self ):
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase , repo_id="""test-feature-extractor""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(f"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCamelCase ( self ):
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__lowerCAmelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
UpperCamelCase__ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCamelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
UpperCamelCase__ = CustomFeatureExtractor.from_pretrained(__lowerCAmelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
f"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=__lowerCAmelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 619 |
import collections
import importlib.util
import os
import re
from pathlib import Path
lowercase_ = """src/transformers"""
# Matches is_xxx_available()
lowercase_ = re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
lowercase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
lowercase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
lowercase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase_ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase_ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
lowercase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
lowercase_ = re.compile(R"""^\s*try:""")
# Catches a line with else:
lowercase_ = re.compile(R"""^\s*else:""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if _re_test_backend.search(_SCREAMING_SNAKE_CASE ) is None:
return None
lowercase__ = [b[0] for b in _re_backend.findall(_SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = 0
while line_index < len(_SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase__ = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
lowercase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ):
lowercase__ = _re_one_line_import_struct.search(_SCREAMING_SNAKE_CASE ).groups()[0]
lowercase__ = re.findall('\[([^\]]+)\]' , _SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
lowercase__ = _re_import_struct_key_value.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
lowercase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
lowercase__ = lines[line_index]
if _re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_import_struct_add_many.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(_SCREAMING_SNAKE_CASE ) is not None:
lowercase__ = _re_between_brackets.search(_SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
lowercase__ = [obj[1:-1] for obj in imports if len(_SCREAMING_SNAKE_CASE ) > 0]
objects.extend(_SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(_SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase__ = []
while (
line_index < len(_SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase__ = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(_SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
lowercase__ = lines[line_index]
lowercase__ = _re_import.search(_SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
def find_duplicates(_SCREAMING_SNAKE_CASE ):
return [k for k, v in collections.Counter(_SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase__ = []
for key in import_dict_objects.keys():
lowercase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase__ = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __UpperCamelCase () -> Tuple:
lowercase__ = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' )
lowercase__ = parse_init(_SCREAMING_SNAKE_CASE )
if objects is not None:
lowercase__ = analyze_results(*_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase () -> Optional[int]:
lowercase__ = []
for path, directories, files in os.walk(_SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(_SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / folder).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace(os.path.sep , '.' )
submodules.append(_SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
lowercase__ = str((Path(_SCREAMING_SNAKE_CASE ) / fname).relative_to(_SCREAMING_SNAKE_CASE ) )
lowercase__ = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(_SCREAMING_SNAKE_CASE )
return submodules
lowercase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __UpperCamelCase () -> List[Any]:
# This is to make sure the transformers module imported is the one in the repo.
lowercase__ = importlib.util.spec_from_file_location(
'transformers' , os.path.join(_SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase__ = spec.loader.load_module()
lowercase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 235 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = ['DPTFeatureExtractor']
_UpperCamelCase : Optional[Any] = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Optional[int] = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray , A : float = 1e-12 , A : int = 1_0_0 , ) -> tuple[float, np.ndarray]:
assert np.shape(A )[0] == np.shape(A )[1]
# Ensure proper dimensionality.
assert np.shape(A )[0] == np.shape(A )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(A ) == np.iscomplexobj(A )
UpperCAmelCase_ : int = np.iscomplexobj(A )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(A , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase_ : Optional[int] = np.dot(A , A )
# Normalize the resulting output vector.
UpperCAmelCase_ : Dict = w / np.linalg.norm(A )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase_ : Tuple = vector.conj().T if is_complex else vector.T
UpperCAmelCase_ : List[Any] = np.dot(A , np.dot(A , A ) )
# Check convergence.
UpperCAmelCase_ : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : int = lambda_
if is_complex:
UpperCAmelCase_ : Dict = np.real(lambda_ )
return lambda_, vector
def __UpperCAmelCase ( ) -> None:
UpperCAmelCase_ : str = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
UpperCAmelCase_ : Optional[Any] = np.array([4_1, 4, 2_0] )
UpperCAmelCase_ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase_ : Optional[Any] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase_ : int = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase_ : Any = real_input_matrix
UpperCAmelCase_ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase_ : int = complex_input_matrix
UpperCAmelCase_ : str = complex_vector
# Our implementation.
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = power_iteration(A , A )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = np.linalg.eigh(A )
# Last eigenvalue is the maximum one.
UpperCAmelCase_ : Any = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase_ : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(A ) - np.abs(A ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 216 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_lowerCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Any ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self: Dict ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[float] = None ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
if audio_length_in_s is None:
_lowerCamelCase : Dict = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCamelCase : List[str] = audio_length_in_s * self.unet.config.sample_rate
_lowerCamelCase : List[str] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_lowerCamelCase : Optional[Any] = int(__lowerCAmelCase )
if sample_size % down_scale_factor != 0:
_lowerCamelCase : Tuple = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
" process." )
_lowerCamelCase : Union[str, Any] = int(__lowerCAmelCase )
_lowerCamelCase : Tuple = next(iter(self.unet.parameters() ) ).dtype
_lowerCamelCase : Tuple = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_lowerCamelCase : Union[str, Any] = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=self.device ,dtype=__lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase ,device=audio.device )
_lowerCamelCase : str = self.scheduler.timesteps.to(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCamelCase : int = self.unet(__lowerCAmelCase ,__lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCamelCase : Any = self.scheduler.step(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ).prev_sample
_lowerCamelCase : str = audio.clamp(-1 ,1 ).float().cpu().numpy()
_lowerCamelCase : Union[str, Any] = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__lowerCAmelCase ) | 46 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple=13 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : List[str]=37 , lowerCAmelCase_ : List[str]="gelu" , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[Any]=5_12 , lowerCAmelCase_ : List[str]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.02 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int="None" , lowerCAmelCase_ : Optional[Any]=3 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : List[str]=None , ) -> List[str]:
'''simple docstring'''
A__ : Dict =parent
A__ : int =batch_size
A__ : Optional[int] =seq_length
A__ : Optional[int] =is_training
A__ : Dict =use_input_mask
A__ : Union[str, Any] =use_token_type_ids
A__ : int =use_labels
A__ : Optional[Any] =vocab_size
A__ : Union[str, Any] =hidden_size
A__ : Dict =num_hidden_layers
A__ : int =num_attention_heads
A__ : List[str] =intermediate_size
A__ : str =hidden_act
A__ : int =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : List[str] =num_labels
A__ : Any =num_choices
A__ : int =relative_attention
A__ : Any =position_biased_input
A__ : Tuple =pos_att_type
A__ : Optional[int] =scope
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
A__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Any =None
if self.use_input_mask:
A__ : Dict =random_attention_mask([self.batch_size, self.seq_length] )
A__ : Optional[int] =None
if self.use_token_type_ids:
A__ : int =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : str =None
A__ : Dict =None
A__ : Optional[Any] =None
if self.use_labels:
A__ : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : List[str] =DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=lowerCAmelCase_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
A__ : Optional[Any] =TFDebertaVaModel(config=lowerCAmelCase_ )
A__ : Union[str, Any] ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A__ : int =[input_ids, input_mask]
A__ : List[str] =model(lowerCAmelCase_ )
A__ : Union[str, Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ : List[str] =TFDebertaVaForMaskedLM(config=lowerCAmelCase_ )
A__ : str ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : Union[str, Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]:
'''simple docstring'''
A__ : List[str] =self.num_labels
A__ : Tuple =TFDebertaVaForSequenceClassification(config=lowerCAmelCase_ )
A__ : Optional[int] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : Union[str, Any] =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
A__ : List[str] =self.num_labels
A__ : Optional[int] =TFDebertaVaForTokenClassification(config=lowerCAmelCase_ )
A__ : int ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : Any =model(lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
A__ : Any =TFDebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
A__ : Union[str, Any] ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
A__ : int =model(lowerCAmelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
A__ : str =self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : Dict =config_and_inputs
A__ : Dict ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
def lowercase__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
A__ : str =TFDebertaVaModelTester(self )
A__ : Optional[int] =ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> Optional[int]:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowercase__ ( self : Any ) -> int:
'''simple docstring'''
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowercase__ ( self : int ) -> str:
'''simple docstring'''
A__ : int =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
self.assertIsNotNone(lowerCAmelCase_ )
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Tuple =TFDebertaVaModel.from_pretrained("""kamalkraj/deberta-v2-xlarge""" )
A__ : Dict =tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
A__ : str =tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ : Tuple =model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
A__ : Optional[Any] =tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 )
| 215 | 0 |
from __future__ import annotations
from collections.abc import MutableSequence
class a :
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ):
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
__lowerCamelCase: Tuple = list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Tuple = degree
def __add__( self : Tuple , SCREAMING_SNAKE_CASE_ : int ):
if self.degree > polynomial_a.degree:
__lowerCamelCase: int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase: List[str] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Dict ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ):
__lowerCamelCase: Dict = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def a_ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ):
__lowerCamelCase: Any = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[Any] ):
__lowerCamelCase: Tuple = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[int] ):
return self.__str__()
def a_ ( self : Union[str, Any] ):
__lowerCamelCase: int = [0] * self.degree
for i in range(self.degree ):
__lowerCamelCase: Optional[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def a_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] = 0 ):
__lowerCamelCase: Optional[Any] = [0] * (self.degree + 2)
__lowerCamelCase: Dict = constant
for i in range(self.degree + 1 ):
__lowerCamelCase: int = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 708 |
def __lowerCAmelCase ( snake_case : int = 1000000 ) -> int:
__lowerCamelCase: Union[str, Any] = set(range(3 , snake_case , 2 ) )
primes.add(2 )
for p in range(3 , snake_case , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , snake_case , snake_case ) ) )
__lowerCamelCase: Any = [float(snake_case ) for n in range(limit + 1 )]
for p in primes:
for n in range(snake_case , limit + 1 , snake_case ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189 | 0 |
'''simple docstring'''
def UpperCamelCase__ ( _lowercase : int ) -> "list[int]":
if upper_limit < 0:
raise ValueError("""Limit for the Catalan sequence must be ≥ 0""" )
__UpperCAmelCase: str = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__UpperCAmelCase: Optional[int] = 1
if upper_limit > 0:
__UpperCAmelCase: int = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(_lowercase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
SCREAMING_SNAKE_CASE_ = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(F"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod() | 523 | '''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class a :
"""simple docstring"""
def __init__( self , snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = parent
__UpperCAmelCase: Dict = 13
__UpperCAmelCase: Optional[int] = 7
__UpperCAmelCase: List[str] = 30
__UpperCAmelCase: List[Any] = self.seq_length + self.mem_len
__UpperCAmelCase: int = 15
__UpperCAmelCase: Optional[int] = True
__UpperCAmelCase: List[str] = True
__UpperCAmelCase: Union[str, Any] = 99
__UpperCAmelCase: Optional[int] = [10, 50, 80]
__UpperCAmelCase: str = 32
__UpperCAmelCase: Optional[Any] = 32
__UpperCAmelCase: Union[str, Any] = 4
__UpperCAmelCase: int = 8
__UpperCAmelCase: str = 128
__UpperCAmelCase: str = 2
__UpperCAmelCase: Tuple = 2
__UpperCAmelCase: Union[str, Any] = None
__UpperCAmelCase: str = 1
__UpperCAmelCase: Optional[Any] = 0
__UpperCAmelCase: int = 3
__UpperCAmelCase: Dict = self.vocab_size - 1
__UpperCAmelCase: int = 0.0_1
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: List[str] = None
if self.use_labels:
__UpperCAmelCase: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase: Optional[int] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase_ ( self ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFTransfoXLModel(snake_case_ )
__UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple()
__UpperCAmelCase: Tuple = {"""input_ids""": input_ids_a, """mems""": mems_a}
__UpperCAmelCase, __UpperCAmelCase: Optional[Any] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: str = TFTransfoXLLMHeadModel(snake_case_ )
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = model(snake_case_ ).to_tuple()
__UpperCAmelCase: Optional[Any] = {"""input_ids""": input_ids_a, """labels""": lm_labels}
__UpperCAmelCase, __UpperCAmelCase: Tuple = model(snake_case_ ).to_tuple()
__UpperCAmelCase, __UpperCAmelCase: Dict = model([input_ids_a, mems_a] ).to_tuple()
__UpperCAmelCase: Union[str, Any] = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels}
__UpperCAmelCase, __UpperCAmelCase: List[str] = model(snake_case_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = TFTransfoXLForSequenceClassification(snake_case_ )
__UpperCAmelCase: List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.prepare_config_and_inputs()
((__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase), (__UpperCAmelCase)): Dict = config_and_inputs
__UpperCAmelCase: List[str] = {"""input_ids""": input_ids_a}
return config, inputs_dict
@require_tf
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__lowerCAmelCase = () if is_tf_available() else ()
__lowerCAmelCase = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def lowercase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = TFTransfoXLModelTester(self )
__UpperCAmelCase: Any = ConfigTester(self , config_class=snake_case_ , d_embed=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
__UpperCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
self.model_tester.set_seed()
__UpperCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase: str = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
__UpperCAmelCase: int = model_class(snake_case_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
__UpperCAmelCase: Any = model.get_output_embeddings()
assert isinstance(snake_case_ , tf.keras.layers.Layer )
__UpperCAmelCase: int = model.get_bias()
assert name is None
else:
__UpperCAmelCase: Optional[int] = model.get_output_embeddings()
assert x is None
__UpperCAmelCase: str = model.get_bias()
assert name is None
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase: str = TFTransfoXLModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" )
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_tf
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("""Skip test until #12651 is resolved.""" )
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" )
# fmt: off
__UpperCAmelCase: str = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
__UpperCAmelCase: Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
__UpperCAmelCase: Dict = model.generate(snake_case_ , max_length=200 , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case_ ) | 523 | 1 |
"""simple docstring"""
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
while a != 0:
snake_case__ : Optional[Any] = b % a, a
return b
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
'''simple docstring'''
if gcd(_UpperCAmelCase , _UpperCAmelCase ) != 1:
snake_case__ : str = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(_UpperCAmelCase )
snake_case__ : str = 1, 0, a
snake_case__ : Tuple = 0, 1, m
while va != 0:
snake_case__ : Optional[Any] = ua // va
snake_case__ : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 703 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
lowercase = """"""
lowercase = """"""
lowercase = """"""
lowercase = """"""
def A__ ( _UpperCAmelCase : str ) -> None:
'''simple docstring'''
snake_case__ : Any = tweepy.OAuthHandler(_UpperCAmelCase , _UpperCAmelCase )
auth.set_access_token(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ : Any = tweepy.API(_UpperCAmelCase )
# initialize a list to hold all the tweepy Tweets
snake_case__ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
snake_case__ : Optional[int] = api.user_timeline(screen_name=_UpperCAmelCase , count=2_00 )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# save the id of the oldest tweet less one
snake_case__ : int = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_UpperCAmelCase ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
snake_case__ : int = api.user_timeline(
screen_name=_UpperCAmelCase , count=2_00 , max_id=_UpperCAmelCase )
# save most recent tweets
alltweets.extend(_UpperCAmelCase )
# update the id of the oldest tweet less one
snake_case__ : List[Any] = alltweets[-1].id - 1
print(F"""...{len(_UpperCAmelCase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
snake_case__ : List[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , "w" ) as f:
snake_case__ : Optional[Any] = csv.writer(_UpperCAmelCase )
writer.writerow(["id", "created_at", "text"] )
writer.writerows(_UpperCAmelCase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 150 | 0 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase ( _A , _A ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCAmelCase = 768 , ) -> int:
super().__init__()
lowercase__ : Any = nn.Parameter(torch.zeros(1 , __snake_case ) )
lowercase__ : Union[str, Any] = nn.Parameter(torch.ones(1 , __snake_case ) )
def _lowerCAmelCase( self , __lowerCAmelCase = None , __lowerCAmelCase = None , ) -> Dict:
lowercase__ : Tuple = nn.Parameter(self.mean.to(__snake_case ).to(__snake_case ) )
lowercase__ : Optional[int] = nn.Parameter(self.std.to(__snake_case ).to(__snake_case ) )
return self
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Any:
lowercase__ : List[Any] = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
lowercase__ : Any = (embeds * self.std) + self.mean
return embeds
| 152 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : torch.FloatTensor
lowerCAmelCase : Optional[torch.FloatTensor] = None
class a ( _A , _A ):
'''simple docstring'''
lowerCAmelCase : Dict = 2
@register_to_config
def __init__( self : int , __snake_case : float = 0.02 , __snake_case : float = 1_00 , __snake_case : float = 1.007 , __snake_case : float = 80 , __snake_case : float = 0.05 , __snake_case : float = 50 , ):
# standard deviation of the initial noise distribution
UpperCAmelCase_ = sigma_max
# setable values
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None # sigma(t_i)
def lowerCamelCase_ ( self : Tuple , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None ):
return sample
def lowerCamelCase_ ( self : List[Any] , __snake_case : int , __snake_case : Union[str, torch.device] = None ):
UpperCAmelCase_ = num_inference_steps
UpperCAmelCase_ = np.arange(0 , self.num_inference_steps )[::-1].copy()
UpperCAmelCase_ = torch.from_numpy(__snake_case ).to(__snake_case )
UpperCAmelCase_ = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCAmelCase_ = torch.tensor(__snake_case , dtype=torch.floataa , device=__snake_case )
def lowerCamelCase_ ( self : Optional[int] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase_ = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
UpperCAmelCase_ = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase_ = self.config.s_noise * randn_tensor(sample.shape , generator=__snake_case ).to(sample.device )
UpperCAmelCase_ = sigma + gamma * sigma
UpperCAmelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCamelCase_ ( self : List[str] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : bool = True , ):
UpperCAmelCase_ = sample_hat + sigma_hat * model_output
UpperCAmelCase_ = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : torch.FloatTensor , __snake_case : float , __snake_case : float , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : bool = True , ):
UpperCAmelCase_ = sample_prev + sigma_prev * model_output
UpperCAmelCase_ = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__snake_case , derivative=__snake_case , pred_original_sample=__snake_case )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : List[str] , __snake_case : Any , __snake_case : Union[str, Any] ):
raise NotImplementedError()
| 144 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/vocab.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/vocab.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/vocab.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'
),
},
'merges_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/merges.txt',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/merges.txt',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/merges.txt',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt',
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'
),
},
'tokenizer_file': {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/tokenizer.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/tokenizer.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json',
'roberta-base-openai-detector': (
'https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'
),
'roberta-large-openai-detector': (
'https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'roberta-base': 512,
'roberta-large': 512,
'roberta-large-mnli': 512,
'distilroberta-base': 512,
'roberta-base-openai-detector': 512,
'roberta-large-openai-detector': 512,
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase: List[str] = ['input_ids', 'attention_mask']
__lowerCamelCase: List[Any] = RobertaTokenizer
def __init__( self : Optional[Any] , a : List[Any]=None , a : Optional[int]=None , a : Any=None , a : List[str]="replace" , a : str="<s>" , a : List[Any]="</s>" , a : Any="</s>" , a : str="<s>" , a : Any="<unk>" , a : Dict="<pad>" , a : Optional[int]="<mask>" , a : List[str]=False , a : int=True , **a : List[str] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : Tuple = getattr(a , pre_tok_state.pop("type" ) )
lowercase_ : str = add_prefix_space
lowercase_ : List[str] = pre_tok_class(**a )
lowercase_ : Any = add_prefix_space
lowercase_ : Dict = "post_processor"
lowercase_ : List[Any] = getattr(self.backend_tokenizer , a , a )
if tokenizer_component_instance:
lowercase_ : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ : Dict = tuple(state["sep"] )
if "cls" in state:
lowercase_ : Optional[Any] = tuple(state["cls"] )
lowercase_ : Optional[int] = False
if state.get("add_prefix_space" , a ) != add_prefix_space:
lowercase_ : List[Any] = add_prefix_space
lowercase_ : Tuple = True
if state.get("trim_offsets" , a ) != trim_offsets:
lowercase_ : Optional[Any] = trim_offsets
lowercase_ : Dict = True
if changes_to_apply:
lowercase_ : Union[str, Any] = getattr(a , state.pop("type" ) )
lowercase_ : Union[str, Any] = component_class(**a )
setattr(self.backend_tokenizer , a , a )
@property
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self : Tuple , a : Optional[int] ):
'''simple docstring'''
lowercase_ : Union[str, Any] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else value
lowercase_ : Optional[int] = value
def lowerCAmelCase__ ( self : List[Any] , *a : Optional[Any] , **a : Union[str, Any] ):
'''simple docstring'''
lowercase_ : List[Any] = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Union[str, Any] , *a : str , **a : str ):
'''simple docstring'''
lowercase_ : int = kwargs.get("is_split_into_words" , a )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a )
def lowerCAmelCase__ ( self : Dict , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowercase_ : Any = self._tokenizer.model.save(a , name=a )
return tuple(a )
def lowerCAmelCase__ ( self : List[str] , a : Any , a : Optional[int]=None ):
'''simple docstring'''
lowercase_ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self : Optional[int] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowercase_ : Any = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 640 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase = 200 ):
"""simple docstring"""
lowercase_ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
lowercase_ : str = [0] * (pence + 1)
lowercase_ : Dict = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_UpperCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 640 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class lowerCamelCase :
def __init__( self , a_ ):
lowerCAmelCase : Any = data
# Initialize hash values
lowerCAmelCase : int = [
0x6a09e667,
0xbb67ae85,
0x3c6ef372,
0xa54ff53a,
0x510e527f,
0x9b05688c,
0x1f83d9ab,
0x5be0cd19,
]
# Initialize round constants
lowerCAmelCase : Union[str, Any] = [
0x428a2f98,
0x71374491,
0xb5c0fbcf,
0xe9b5dba5,
0x3956c25b,
0x59f111f1,
0x923f82a4,
0xab1c5ed5,
0xd807aa98,
0x12835b01,
0x243185be,
0x550c7dc3,
0x72be5d74,
0x80deb1fe,
0x9bdc06a7,
0xc19bf174,
0xe49b69c1,
0xefbe4786,
0x0fc19dc6,
0x240ca1cc,
0x2de92c6f,
0x4a7484aa,
0x5cb0a9dc,
0x76f988da,
0x983e5152,
0xa831c66d,
0xb00327c8,
0xbf597fc7,
0xc6e00bf3,
0xd5a79147,
0x06ca6351,
0x14292967,
0x27b70a85,
0x2e1b2138,
0x4d2c6dfc,
0x53380d13,
0x650a7354,
0x766a0abb,
0x81c2c92e,
0x92722c85,
0xa2bfe8a1,
0xa81a664b,
0xc24b8b70,
0xc76c51a3,
0xd192e819,
0xd6990624,
0xf40e3585,
0x106aa070,
0x19a4c116,
0x1e376c08,
0x2748774c,
0x34b0bcb5,
0x391c0cb3,
0x4ed8aa4a,
0x5b9cca4f,
0x682e6ff3,
0x748f82ee,
0x78a5636f,
0x84c87814,
0x8cc70208,
0x90befffa,
0xa4506ceb,
0xbef9a3f7,
0xc67178f2,
]
lowerCAmelCase : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowerCamelCase ( a_ ):
lowerCAmelCase : Union[str, Any] = b'''\x80''' + (b'''\x00''' * (63 - (len(_UpperCAmelCase ) + 8) % 64))
lowerCAmelCase : Tuple = struct.pack(">Q" , (len(_UpperCAmelCase ) * 8) )
return data + padding + big_endian_integer
def _lowerCamelCase ( self ):
# Convert into blocks of 64 bytes
lowerCAmelCase : Dict = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase : Union[str, Any] = list(struct.unpack(">16L" , _UpperCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase : str = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase : int = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase : Optional[Any] = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase : Optional[int] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
lowerCAmelCase : int = self.ror(_UpperCAmelCase , 6 ) ^ self.ror(_UpperCAmelCase , 11 ) ^ self.ror(_UpperCAmelCase , 25 )
lowerCAmelCase : Optional[int] = (e & f) ^ ((~e & 0xffffffff) & g)
lowerCAmelCase : Dict = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
lowerCAmelCase : Tuple = self.ror(_UpperCAmelCase , 2 ) ^ self.ror(_UpperCAmelCase , 13 ) ^ self.ror(_UpperCAmelCase , 22 )
lowerCAmelCase : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase : Any = (sa + maj) % 0x100000000
lowerCAmelCase : Optional[Any] = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
lowerCAmelCase : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase : Optional[Any] = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase : List[Any] = ''''''.join([hex(_UpperCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def _lowerCamelCase ( self , a_ , a_ ):
return 0xffffffff & (value << (32 - rotations)) | (value >> rotations)
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
import hashlib
lowerCAmelCase : Any = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(_UpperCAmelCase ).hash , hashlib.shaaaa(_UpperCAmelCase ).hexdigest() )
def __A ( ):
import doctest
doctest.testmod()
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"-s" ,"--string" ,dest="input_string" ,default="Hello World!! Welcome to Cryptography" ,help="Hash the string" ,)
parser.add_argument(
"-f" ,"--file" ,dest="input_file" ,help="Hash contents of a file" )
lowerCAmelCase : Dict = parser.parse_args()
lowerCAmelCase : Dict = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"rb" ) as f:
lowerCAmelCase : Any = f.read()
else:
lowerCAmelCase : Optional[Any] = bytes(a_ ,"utf-8" )
print(SHAaaa(a_ ).hash )
if __name__ == "__main__":
main()
| 525 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
A = F'https://www.google.com/search?q={query}&num=100'
A = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
A = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
A = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link) | 52 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""yjernite/retribert-base-uncased""": (
"""https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Dict = {
"""yjernite/retribert-base-uncased""": 512,
}
_UpperCAmelCase : str = {
"""yjernite/retribert-base-uncased""": {"""do_lower_case""": True},
}
class lowercase ( __snake_case ):
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE : Tuple = RetriBertTokenizer
__SCREAMING_SNAKE_CASE : str = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowercase ) != tokenize_chinese_chars
):
snake_case_ = getattr(_lowercase , normalizer_state.pop('type' ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_lowercase )
snake_case_ = do_lower_case
def a ( self , snake_case , snake_case=None ):
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , snake_case , snake_case = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , snake_case , snake_case = None ):
snake_case_ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 719 |
from __future__ import annotations
class lowercase :
def __init__( self , snake_case ):
snake_case_ = TypeError(
'Matrices must be formed from a list of zero or more lists containing at '
'least one and the same number of values, each of which must be of type '
'int or float.' )
if len(snake_case ) != 0:
snake_case_ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(snake_case ) != cols:
raise error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise error
snake_case_ = rows
else:
snake_case_ = []
def a ( self ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a ( self ):
return len(self.rows )
@property
def a ( self ):
return len(self.rows[0] )
@property
def a ( self ):
return (self.num_rows, self.num_columns)
@property
def a ( self ):
return self.order[0] == self.order[1]
def a ( self ):
snake_case_ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(snake_case )
def a ( self ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a ( self ):
return bool(self.determinant() )
def a ( self , snake_case , snake_case ):
snake_case_ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(snake_case ).determinant()
def a ( self , snake_case , snake_case ):
if (row + column) % 2 == 0:
return self.get_minor(snake_case , snake_case )
return -1 * self.get_minor(snake_case , snake_case )
def a ( self ):
return Matrix(
[
[self.get_minor(snake_case , snake_case ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a ( self ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a ( self ):
snake_case_ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(snake_case )
def a ( self ):
snake_case_ = self.determinant()
if not determinant:
raise TypeError('Only matrices with a non-zero determinant have an inverse' )
return self.adjugate() * (1 / determinant)
def __repr__( self ):
return str(self.rows )
def __str__( self ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'[' + '. '.join([str(snake_case ) for value in row] ) + '.]'
for row in self.rows
] )
+ "]"
)
def a ( self , snake_case , snake_case = None ):
snake_case_ = TypeError('Row must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in row:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_columns:
raise ValueError(
'Row must be equal in length to the other rows in the matrix' )
if position is None:
self.rows.append(snake_case )
else:
snake_case_ = self.rows[0:position] + [row] + self.rows[position:]
def a ( self , snake_case , snake_case = None ):
snake_case_ = TypeError(
'Column must be a list containing all ints and/or floats' )
if not isinstance(snake_case , snake_case ):
raise type_error
for value in column:
if not isinstance(snake_case , (int, float) ):
raise type_error
if len(snake_case ) != self.num_rows:
raise ValueError(
'Column must be equal in length to the other columns in the matrix' )
if position is None:
snake_case_ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
snake_case_ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , snake_case ):
return not self == other
def __neg__( self ):
return self * -1
def __add__( self , snake_case ):
if self.order != other.order:
raise ValueError('Addition requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , snake_case ):
if self.order != other.order:
raise ValueError('Subtraction requires matrices of the same order' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , snake_case ):
if isinstance(snake_case , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(snake_case , snake_case ):
if self.num_columns != other.num_rows:
raise ValueError(
'The number of columns in the first matrix must '
'be equal to the number of rows in the second' )
return Matrix(
[
[Matrix.dot_product(snake_case , snake_case ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'A Matrix can only be multiplied by an int, float, or another matrix' )
def __pow__( self , snake_case ):
if not isinstance(snake_case , snake_case ):
raise TypeError('A Matrix can only be raised to the power of an int' )
if not self.is_square:
raise ValueError('Only square matrices can be raised to a power' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'Only invertable matrices can be raised to a negative power' )
snake_case_ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a ( cls , snake_case , snake_case ):
return sum(row[i] * column[i] for i in range(len(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def UpperCamelCase_( snake_case__: List[Any] , snake_case__: List[Any] ) -> List[Any]:
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
UpperCAmelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
UpperCAmelCase__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
UpperCAmelCase__ = transform(snake_case__ ).unsqueeze(0 ).to(snake_case__ )
return image
def UpperCamelCase_( snake_case__: Union[str, Any] ) -> Optional[Any]:
if "visual_encoder" in key:
UpperCAmelCase__ = re.sub('visual_encoder*' , 'vision_model.encoder' , snake_case__ )
if "blocks" in key:
UpperCAmelCase__ = re.sub(r'blocks' , 'layers' , snake_case__ )
if "attn" in key:
UpperCAmelCase__ = re.sub(r'attn' , 'self_attn' , snake_case__ )
if "norm1" in key:
UpperCAmelCase__ = re.sub(r'norm1' , 'layer_norm1' , snake_case__ )
if "norm2" in key:
UpperCAmelCase__ = re.sub(r'norm2' , 'layer_norm2' , snake_case__ )
if "encoder.norm" in key:
UpperCAmelCase__ = re.sub(r'encoder.norm' , 'post_layernorm' , snake_case__ )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase__ = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , snake_case__ )
if "encoder.pos_embed" in key:
UpperCAmelCase__ = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , snake_case__ )
if "encoder.cls_token" in key:
UpperCAmelCase__ = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , snake_case__ )
if "self_attn" in key:
UpperCAmelCase__ = re.sub(r'self_attn.proj' , 'self_attn.projection' , snake_case__ )
return key
@torch.no_grad()
def UpperCamelCase_( snake_case__: int , snake_case__: Any=None ) -> str:
if config_path is not None:
UpperCAmelCase__ = BlipConfig.from_pretrained(snake_case__ )
else:
UpperCAmelCase__ = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
UpperCAmelCase__ = BlipForConditionalGeneration(snake_case__ ).eval()
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
UpperCAmelCase__ = blip_decoder(pretrained=snake_case__ , image_size=3_84 , vit='base' )
UpperCAmelCase__ = pt_model.eval()
UpperCAmelCase__ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ = modified_state_dict.pop(snake_case__ )
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = value
hf_model.load_state_dict(snake_case__ )
UpperCAmelCase__ = 3_84
UpperCAmelCase__ = load_demo_image(image_size=snake_case__ , device='cpu' )
UpperCAmelCase__ = BertTokenizer.from_pretrained('bert-base-uncased' )
UpperCAmelCase__ = tokenizer(['a picture of'] ).input_ids
UpperCAmelCase__ = hf_model.generate(snake_case__ , snake_case__ )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
UpperCAmelCase__ = hf_model.generate(snake_case__ )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(snake_case__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase__ = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
UpperCAmelCase__ = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit='base' )
vqa_model.eval()
UpperCAmelCase__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ = modified_state_dict.pop(snake_case__ )
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = value
UpperCAmelCase__ = BlipForQuestionAnswering(snake_case__ )
hf_vqa_model.load_state_dict(snake_case__ )
UpperCAmelCase__ = ['How many dogs are in this image?']
UpperCAmelCase__ = tokenizer(snake_case__ , return_tensors='pt' ).input_ids
UpperCAmelCase__ = hf_vqa_model.generate(snake_case__ , snake_case__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
UpperCAmelCase__ = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
UpperCAmelCase__ = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit='base' )
itm_model.eval()
UpperCAmelCase__ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase__ = modified_state_dict.pop(snake_case__ )
UpperCAmelCase__ = rename_key(snake_case__ )
UpperCAmelCase__ = value
UpperCAmelCase__ = BlipForImageTextRetrieval(snake_case__ )
UpperCAmelCase__ = ['A picture of a woman with a dog sitting in a beach']
UpperCAmelCase__ = tokenizer(
snake_case__ , return_tensors='pt' , padding='max_length' , truncation=snake_case__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(snake_case__ )
hf_itm_model.eval()
UpperCAmelCase__ = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
UpperCAmelCase__ = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
_UpperCamelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 146 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_UpperCamelCase = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
_UpperCamelCase = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
_UpperCamelCase = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def UpperCamelCase__ (self , __a , __a ) -> int:
"""simple docstring"""
UpperCAmelCase__ = 0.0
for i, j in zip(__a , __a ):
n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0
UpperCAmelCase__ = n_correct / len(__a )
return {
"accuracy": accuracy,
}
| 146 | 1 |
"""simple docstring"""
def __lowercase ( a : int ) -> int:
if n == 1 or not isinstance(a , a ):
return 0
elif n == 2:
return 1
else:
__snake_case : Optional[Any] =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowercase ( a : int ) -> int:
__snake_case : Any =0
__snake_case : List[str] =2
while digits < n:
index += 1
__snake_case : int =len(str(fibonacci(a ) ) )
return index
def __lowercase ( a : int = 1_000 ) -> int:
return fibonacci_digits_index(a )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 720 |
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowercase :
def __init__( self : str , a : List[str] , a : Any=2 , a : int=8 , a : List[str]=True , a : Union[str, Any]=True , a : Union[str, Any]=True , a : Optional[int]=True , a : Union[str, Any]=9_9 , a : Dict=1_6 , a : Union[str, Any]=5 , a : int=2 , a : List[str]=3_6 , a : int="gelu" , a : Optional[int]=0.0 , a : Any=0.0 , a : Optional[Any]=5_1_2 , a : Tuple=1_6 , a : Dict=2 , a : Union[str, Any]=0.0_2 , a : Dict=3 , a : Union[str, Any]=4 , a : Optional[int]=None , ):
"""simple docstring"""
__snake_case : Any =parent
__snake_case : int =batch_size
__snake_case : Dict =seq_length
__snake_case : Any =is_training
__snake_case : Optional[int] =use_input_mask
__snake_case : List[Any] =use_token_type_ids
__snake_case : List[str] =use_labels
__snake_case : Optional[Any] =vocab_size
__snake_case : Optional[Any] =hidden_size
__snake_case : Optional[Any] =num_hidden_layers
__snake_case : Any =num_attention_heads
__snake_case : Optional[int] =intermediate_size
__snake_case : Dict =hidden_act
__snake_case : List[str] =hidden_dropout_prob
__snake_case : List[Any] =attention_probs_dropout_prob
__snake_case : List[str] =max_position_embeddings
__snake_case : Tuple =type_vocab_size
__snake_case : Tuple =type_sequence_label_size
__snake_case : int =initializer_range
__snake_case : Any =num_labels
__snake_case : List[str] =num_choices
__snake_case : Union[str, Any] =scope
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : int =None
if self.use_input_mask:
__snake_case : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] =None
if self.use_token_type_ids:
__snake_case : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : str =None
__snake_case : int =None
__snake_case : int =None
if self.use_labels:
__snake_case : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : int =self.get_config()
__snake_case : Optional[Any] =3_0_0
return config
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Tuple =self.prepare_config_and_inputs()
__snake_case : Dict =True
__snake_case : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__snake_case : Dict =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _UpperCamelCase ( self : List[str] , a : List[Any] , a : int , a : Optional[int] , a : List[Any] , a : Tuple , a : Tuple , a : List[Any] ):
"""simple docstring"""
__snake_case : List[Any] =MraModel(config=a )
model.to(a )
model.eval()
__snake_case : Union[str, Any] =model(a , attention_mask=a , token_type_ids=a )
__snake_case : Any =model(a , token_type_ids=a )
__snake_case : int =model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : str , a : Tuple , a : Any , a : int , a : Optional[int] , a : Union[str, Any] , a : int , a : Tuple , a : Optional[Any] , a : Optional[int] , ):
"""simple docstring"""
__snake_case : Any =True
__snake_case : Union[str, Any] =MraModel(a )
model.to(a )
model.eval()
__snake_case : Union[str, Any] =model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
__snake_case : List[Any] =model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
__snake_case : List[str] =model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Tuple , a : Any , a : List[Any] , a : int , a : str , a : Optional[Any] , a : List[str] , a : List[str] ):
"""simple docstring"""
__snake_case : Tuple =MraForMaskedLM(config=a )
model.to(a )
model.eval()
__snake_case : str =model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : List[str] , a : Optional[int] , a : int , a : List[str] , a : int , a : Any , a : Union[str, Any] , a : Union[str, Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =MraForQuestionAnswering(config=a )
model.to(a )
model.eval()
__snake_case : Dict =model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Dict , a : Dict , a : Dict , a : Optional[Any] , a : int , a : int , a : List[str] , a : Any ):
"""simple docstring"""
__snake_case : Optional[Any] =self.num_labels
__snake_case : Optional[Any] =MraForSequenceClassification(a )
model.to(a )
model.eval()
__snake_case : str =model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : Optional[int] , a : Tuple , a : Any , a : Optional[int] , a : Any , a : int , a : str , a : Optional[int] ):
"""simple docstring"""
__snake_case : Tuple =self.num_labels
__snake_case : Optional[int] =MraForTokenClassification(config=a )
model.to(a )
model.eval()
__snake_case : List[str] =model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : str , a : Tuple , a : Tuple , a : Optional[Any] , a : Dict , a : Dict , a : Dict , a : str ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.num_choices
__snake_case : Optional[int] =MraForMultipleChoice(config=a )
model.to(a )
model.eval()
__snake_case : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Tuple =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Any =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : int =model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Optional[int] =config_and_inputs
__snake_case : Tuple ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( lowerCAmelCase , unittest.TestCase ):
_a : Union[str, Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_a : int = False
_a : Union[str, Any] = False
_a : Dict = False
_a : Dict = False
_a : Any = ()
def _UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
__snake_case : Optional[Any] =MraModelTester(self )
__snake_case : List[Any] =ConfigTester(self , config_class=a , hidden_size=3_7 )
def _UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : Any ):
"""simple docstring"""
__snake_case : List[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case : Any =type
self.model_tester.create_and_check_model(*a )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
__snake_case : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _UpperCamelCase ( self : int ):
"""simple docstring"""
__snake_case : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _UpperCamelCase ( self : str ):
"""simple docstring"""
__snake_case : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Tuple =MraModel.from_pretrained(a )
self.assertIsNotNone(a )
@unittest.skip(reason='''MRA does not output attentions''' )
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Dict ):
"""simple docstring"""
__snake_case : str =MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__snake_case : Optional[int] =torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__snake_case : List[str] =model(a )[0]
__snake_case : Any =torch.Size((1, 2_5_6, 7_6_8) )
self.assertEqual(output.shape , a )
__snake_case : int =torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : List[str] ):
"""simple docstring"""
__snake_case : Any =MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__snake_case : Union[str, Any] =torch.arange(2_5_6 ).unsqueeze(0 )
with torch.no_grad():
__snake_case : Optional[int] =model(a )[0]
__snake_case : Union[str, Any] =5_0_2_6_5
__snake_case : List[str] =torch.Size((1, 2_5_6, vocab_size) )
self.assertEqual(output.shape , a )
__snake_case : str =torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
__snake_case : List[Any] =MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__snake_case : Optional[int] =torch.arange(4_0_9_6 ).unsqueeze(0 )
with torch.no_grad():
__snake_case : Tuple =model(a )[0]
__snake_case : Optional[int] =5_0_2_6_5
__snake_case : Tuple =torch.Size((1, 4_0_9_6, vocab_size) )
self.assertEqual(output.shape , a )
__snake_case : List[str] =torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1e-4 ) )
| 497 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : List[Any] = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = 'sew'
def __init__( self:Any , _a:Union[str, Any]=32 , _a:Optional[int]=7_68 , _a:Optional[int]=12 , _a:Any=12 , _a:List[Any]=30_72 , _a:List[str]=2 , _a:int="gelu" , _a:Any=0.1 , _a:Tuple=0.1 , _a:int=0.1 , _a:int=0.0 , _a:Any=0.1 , _a:Tuple=0.1 , _a:List[Any]=0.02 , _a:List[str]=1e-5 , _a:Union[str, Any]="group" , _a:Optional[int]="gelu" , _a:Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , _a:Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _a:Optional[int]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _a:Tuple=False , _a:Any=1_28 , _a:Optional[Any]=16 , _a:str=True , _a:Dict=0.05 , _a:Tuple=10 , _a:Optional[Any]=2 , _a:str=0.0 , _a:Union[str, Any]=10 , _a:Optional[Any]=0 , _a:Union[str, Any]="mean" , _a:Tuple=False , _a:List[str]=False , _a:Optional[Any]=2_56 , _a:Any=0 , _a:Any=1 , _a:List[str]=2 , **_a:Any , ):
super().__init__(**_a , pad_token_id=_a , bos_token_id=_a , eos_token_id=_a )
snake_case__ = hidden_size
snake_case__ = feat_extract_norm
snake_case__ = feat_extract_activation
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = list(_a )
snake_case__ = conv_bias
snake_case__ = num_conv_pos_embeddings
snake_case__ = num_conv_pos_embedding_groups
snake_case__ = len(self.conv_dim )
snake_case__ = num_hidden_layers
snake_case__ = intermediate_size
snake_case__ = squeeze_factor
snake_case__ = hidden_act
snake_case__ = num_attention_heads
snake_case__ = hidden_dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = feat_proj_dropout
snake_case__ = final_dropout
snake_case__ = layerdrop
snake_case__ = layer_norm_eps
snake_case__ = initializer_range
snake_case__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case__ = apply_spec_augment
snake_case__ = mask_time_prob
snake_case__ = mask_time_length
snake_case__ = mask_time_min_masks
snake_case__ = mask_feature_prob
snake_case__ = mask_feature_length
snake_case__ = mask_feature_min_masks
# ctc loss
snake_case__ = ctc_loss_reduction
snake_case__ = ctc_zero_infinity
# sequence classification
snake_case__ = use_weighted_layer_sum
snake_case__ = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 33 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase : Any = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __a ):
'''simple docstring'''
a_ = "deberta-v2"
def __init__( self : str ,_a : str=128100 ,_a : Optional[int]=1536 ,_a : Any=24 ,_a : Any=24 ,_a : str=6144 ,_a : int="gelu" ,_a : str=0.1 ,_a : List[Any]=0.1 ,_a : List[str]=512 ,_a : Optional[Any]=0 ,_a : Union[str, Any]=0.02 ,_a : Optional[Any]=1e-7 ,_a : Optional[int]=False ,_a : Any=-1 ,_a : Any=0 ,_a : Optional[int]=True ,_a : int=None ,_a : Optional[Any]=0 ,_a : Any="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
A_ : int = hidden_size
A_ : List[str] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : int = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[str] = initializer_range
A_ : List[Any] = relative_attention
A_ : int = max_relative_positions
A_ : List[str] = pad_token_id
A_ : List[str] = position_biased_input
# Backwards compatibility
if type(lowerCAmelCase_ ) == str:
A_ : Tuple = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Tuple = pos_att_type
A_ : Tuple = vocab_size
A_ : Optional[Any] = layer_norm_eps
A_ : Tuple = kwargs.get("""pooler_hidden_size""" ,lowerCAmelCase_ )
A_ : str = pooler_dropout
A_ : int = pooler_hidden_act
class __lowerCAmelCase ( __a ):
'''simple docstring'''
@property
def _a ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Dict = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return 12
def _a ( self : List[Any] ,_a : Tuple ,_a : Optional[int] = -1 ,_a : Optional[int] = -1 ,_a : Optional[int] = -1 ,_a : Dict = False ,_a : Tuple = None ,_a : Optional[int] = 3 ,_a : Optional[int] = 40 ,_a : Tuple = 40 ,_a : List[Any] = None ,):
'''simple docstring'''
A_ : Optional[int] = super().generate_dummy_inputs(preprocessor=lowerCAmelCase_ ,framework=lowerCAmelCase_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 718 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.