code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def lowercase_ ( _UpperCAmelCase ): """simple docstring""" A_ : List[Any] = [int(_UpperCAmelCase ) for i in ip_va_address.split('''.''' ) if i.isdigit()] return len(_UpperCAmelCase ) == 4 and all(0 <= int(_UpperCAmelCase ) <= 254 for octet in octets ) if __name__ == "__main__": _lowerCamelCase : str = input().strip() _lowerCamelCase : Any = 'valid' if is_ip_va_address_valid(ip) else 'invalid' print(f'{ip} is a {valid_or_invalid} IP v4 address.')
167
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCamelCase : str = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class lowercase ( __UpperCAmelCase , __UpperCAmelCase): __lowerCAmelCase : List[Any] = """convnextv2""" def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=2_24 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] , ): """simple docstring""" super().__init__(**_lowerCamelCase ) A_ : str = num_channels A_ : int = patch_size A_ : Union[str, Any] = num_stages A_ : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes A_ : Any = [3, 3, 9, 3] if depths is None else depths A_ : Optional[int] = hidden_act A_ : Tuple = initializer_range A_ : int = layer_norm_eps A_ : List[Any] = drop_path_rate A_ : Union[str, Any] = image_size A_ : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] A_ , A_ : Tuple = get_aligned_output_features_output_indices( out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
167
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""", """distilbert-base-uncased-distilled-squad""": ( """https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""", """distilbert-base-cased-distilled-squad""": ( """https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json""" ), """distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""", """distilbert-base-multilingual-cased""": ( """https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json""" ), """distilbert-base-uncased-finetuned-sst-2-english""": ( """https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json""" ), } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''distilbert''' lowerCamelCase_ = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , lowercase=3_0_5_2_2 , lowercase=5_1_2 , lowercase=False , lowercase=6 , lowercase=1_2 , lowercase=7_6_8 , lowercase=4 * 7_6_8 , lowercase=0.1 , lowercase=0.1 , lowercase="gelu" , lowercase=0.02 , lowercase=0.1 , lowercase=0.2 , lowercase=0 , **lowercase , ): """simple docstring""" A_ : str = vocab_size A_ : Tuple = max_position_embeddings A_ : Dict = sinusoidal_pos_embds A_ : Tuple = n_layers A_ : List[Any] = n_heads A_ : Any = dim A_ : Union[str, Any] = hidden_dim A_ : Dict = dropout A_ : List[str] = attention_dropout A_ : List[Any] = activation A_ : str = initializer_range A_ : Optional[Any] = qa_dropout A_ : List[str] = seq_classif_dropout super().__init__(**lowercase , pad_token_id=lowercase ) class UpperCAmelCase ( __A ): '''simple docstring''' @property def lowerCAmelCase_ ( self ): """simple docstring""" if self.task == "multiple-choice": A_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A_ : Optional[int] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
350
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _UpperCAmelCase = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def UpperCamelCase ( __lowercase : str ,__lowercase : Dict=None ): '''simple docstring''' require_version(deps[pkg] ,__lowercase )
192
0
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int]=False ): """simple docstring""" __a = OmegaConf.load(_SCREAMING_SNAKE_CASE ) if display: print(yaml.dump(OmegaConf.to_container(_SCREAMING_SNAKE_CASE ) ) ) return config def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : Tuple=None ): """simple docstring""" if conf_path is None: __a = '''./model_checkpoints/vqgan_only.yaml''' __a = load_config(_SCREAMING_SNAKE_CASE , display=_SCREAMING_SNAKE_CASE ) __a = VQModel(**config.model.params ) if ckpt_path is None: __a = '''./model_checkpoints/vqgan_only.pt''' __a = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE ) if ".ckpt" in ckpt_path: __a = sd['''state_dict'''] model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) del sd return model def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ): """simple docstring""" __a = model.encode(_SCREAMING_SNAKE_CASE ) print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" ) __a = model.decode(_SCREAMING_SNAKE_CASE ) return xrec def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Any]=False ): """simple docstring""" __a = string.rsplit(""".""" , 1 ) if reload: __a = importlib.import_module(_SCREAMING_SNAKE_CASE ) importlib.reload(_SCREAMING_SNAKE_CASE ) return getattr(importlib.import_module(_SCREAMING_SNAKE_CASE , package=_SCREAMING_SNAKE_CASE ) , cls ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if "target" not in config: raise KeyError("""Expected key `target` to instantiate.""" ) return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) ) def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any=True ): """simple docstring""" __a = instantiate_from_config(_SCREAMING_SNAKE_CASE ) if sd is not None: model.load_state_dict(_SCREAMING_SNAKE_CASE ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if ckpt: __a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" ) __a = pl_sd['''global_step'''] print(f"loaded model from global step {global_step}." ) else: __a = {'''state_dict''': None} __a = None __a = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_SCREAMING_SNAKE_CASE , eval_mode=_SCREAMING_SNAKE_CASE )['''model'''] return model, global_step
302
import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = KandinskyVaaPriorPipeline lowerCAmelCase_ = ['''prompt'''] lowerCAmelCase_ = ['''prompt''', '''negative_prompt'''] lowerCAmelCase_ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] lowerCAmelCase_ = False @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return 32 @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return 32 @property def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return self.time_input_dim @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return 100 @property def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(_A ) @property def UpperCAmelCase__ ( self : str ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = { '''num_attention_heads''': 2, '''attention_head_dim''': 12, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } __SCREAMING_SNAKE_CASE : Optional[Any] = PriorTransformer(**_A ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection(_A ) return model @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=_A , do_normalize=_A , do_resize=_A , image_mean=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , image_std=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , resample=3 , size=224 , ) return image_processor def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_prior __SCREAMING_SNAKE_CASE : str = self.dummy_image_encoder __SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_text_encoder __SCREAMING_SNAKE_CASE : List[Any] = self.dummy_tokenizer __SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_image_processor __SCREAMING_SNAKE_CASE : str = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=_A , clip_sample_range=10.0 , ) __SCREAMING_SNAKE_CASE : int = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def UpperCAmelCase__ ( self : Union[str, Any] , _A : int , _A : Dict=0 ): """simple docstring""" if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = '''cpu''' __SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Any = self.pipeline_class(**_A ) __SCREAMING_SNAKE_CASE : List[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(_A ) ) __SCREAMING_SNAKE_CASE : Tuple = output.image_embeds __SCREAMING_SNAKE_CASE : Optional[Any] = pipe( **self.get_dummy_inputs(_A ) , return_dict=_A , )[0] __SCREAMING_SNAKE_CASE : Tuple = image[0, -10:] __SCREAMING_SNAKE_CASE : List[Any] = image_from_tuple[0, -10:] assert image.shape == (1, 32) __SCREAMING_SNAKE_CASE : List[str] = np.array( [-0.05_32, 1.71_20, 0.36_56, -1.08_52, -0.89_46, -1.17_56, 0.43_48, 0.24_82, 0.51_46, -0.11_56] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = torch_device == '''cpu''' __SCREAMING_SNAKE_CASE : Any = True __SCREAMING_SNAKE_CASE : int = False self._test_inference_batch_single_identical( test_max_difference=_A , relax_max_difference=_A , test_mean_pixel_difference=_A , ) @skip_mps def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = torch_device == '''cpu''' __SCREAMING_SNAKE_CASE : List[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=_A , test_mean_pixel_difference=_A , )
303
0
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def A ( snake_case :List[Any] , snake_case :Dict=1 ) -> Optional[int]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def A ( snake_case :Dict , snake_case :int=0 ) -> Optional[int]: __UpperCamelCase = [] for old_item in old_list: __UpperCamelCase = old_item.replace('in_layers.0' , 'norm1' ) __UpperCamelCase = new_item.replace('in_layers.2' , 'conv1' ) __UpperCamelCase = new_item.replace('out_layers.0' , 'norm2' ) __UpperCamelCase = new_item.replace('out_layers.3' , 'conv2' ) __UpperCamelCase = new_item.replace('emb_layers.1' , 'time_emb_proj' ) __UpperCamelCase = new_item.replace('skip_connection' , 'conv_shortcut' ) __UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case :Optional[Any] , snake_case :Tuple=0 ) -> Tuple: __UpperCamelCase = [] for old_item in old_list: __UpperCamelCase = old_item __UpperCamelCase = new_item.replace('norm.weight' , 'group_norm.weight' ) __UpperCamelCase = new_item.replace('norm.bias' , 'group_norm.bias' ) __UpperCamelCase = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) __UpperCamelCase = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) __UpperCamelCase = shave_segments(snake_case , n_shave_prefix_segments=snake_case ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def A ( snake_case :int , snake_case :List[str] , snake_case :List[str] , snake_case :Any=None , snake_case :Optional[int]=None , snake_case :Union[str, Any]=None ) -> Optional[int]: assert isinstance(snake_case , snake_case ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __UpperCamelCase = old_checkpoint[path] __UpperCamelCase = old_tensor.shape[0] // 3 __UpperCamelCase = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __UpperCamelCase = old_tensor.shape[0] // config['num_head_channels'] // 3 __UpperCamelCase = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = old_tensor.split(channels // num_heads , dim=1 ) __UpperCamelCase = query.reshape(snake_case ) __UpperCamelCase = key.reshape(snake_case ) __UpperCamelCase = value.reshape(snake_case ) for path in paths: __UpperCamelCase = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __UpperCamelCase = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) __UpperCamelCase = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) __UpperCamelCase = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: __UpperCamelCase = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __UpperCamelCase = old_checkpoint[path['old']][:, :, 0] else: __UpperCamelCase = old_checkpoint[path['old']] def A ( snake_case :Optional[Any] , snake_case :Dict ) -> Optional[Any]: __UpperCamelCase = {} __UpperCamelCase = checkpoint['time_embed.0.weight'] __UpperCamelCase = checkpoint['time_embed.0.bias'] __UpperCamelCase = checkpoint['time_embed.2.weight'] __UpperCamelCase = checkpoint['time_embed.2.bias'] __UpperCamelCase = checkpoint['input_blocks.0.0.weight'] __UpperCamelCase = checkpoint['input_blocks.0.0.bias'] __UpperCamelCase = checkpoint['out.0.weight'] __UpperCamelCase = checkpoint['out.0.bias'] __UpperCamelCase = checkpoint['out.2.weight'] __UpperCamelCase = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the middle blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the output blocks only __UpperCamelCase = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) __UpperCamelCase = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(snake_case ) } for i in range(1 , snake_case ): __UpperCamelCase = (i - 1) // (config['num_res_blocks'] + 1) __UpperCamelCase = (i - 1) % (config['num_res_blocks'] + 1) __UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] __UpperCamelCase = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: __UpperCamelCase = checkpoint[ f'input_blocks.{i}.0.op.weight' ] __UpperCamelCase = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} __UpperCamelCase = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path, resnet_op] , config=snake_case ) if len(snake_case ): __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } __UpperCamelCase = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=snake_case , config=snake_case , ) __UpperCamelCase = middle_blocks[0] __UpperCamelCase = middle_blocks[1] __UpperCamelCase = middle_blocks[2] __UpperCamelCase = renew_resnet_paths(snake_case ) assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case ) __UpperCamelCase = renew_resnet_paths(snake_case ) assign_to_checkpoint(snake_case , snake_case , snake_case , config=snake_case ) __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , attention_paths_to_split=snake_case , config=snake_case ) for i in range(snake_case ): __UpperCamelCase = i // (config['num_res_blocks'] + 1) __UpperCamelCase = i % (config['num_res_blocks'] + 1) __UpperCamelCase = [shave_segments(snake_case , 2 ) for name in output_blocks[i]] __UpperCamelCase = {} for layer in output_block_layers: __UpperCamelCase , __UpperCamelCase = layer.split('.' )[0], shave_segments(snake_case , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(snake_case ) else: __UpperCamelCase = [layer_name] if len(snake_case ) > 1: __UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] __UpperCamelCase = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = renew_resnet_paths(snake_case ) __UpperCamelCase = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __UpperCamelCase = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) __UpperCamelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] __UpperCamelCase = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(snake_case ) == 2: __UpperCamelCase = [] if len(snake_case ): __UpperCamelCase = renew_attention_paths(snake_case ) __UpperCamelCase = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } __UpperCamelCase = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( snake_case , snake_case , snake_case , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=snake_case , ) else: __UpperCamelCase = renew_resnet_paths(snake_case , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __UpperCamelCase = '.'.join(['output_blocks', str(snake_case ), path['old']] ) __UpperCamelCase = '.'.join(['up_blocks', str(snake_case ), 'resnets', str(snake_case ), path['new']] ) __UpperCamelCase = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": UpperCamelCase : Any = argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") UpperCamelCase : Dict = parser.parse_args() UpperCamelCase : Optional[Any] = torch.load(args.checkpoint_path) with open(args.config_file) as f: UpperCamelCase : int = json.loads(f.read()) UpperCamelCase : Dict = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] UpperCamelCase : Optional[Any] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: UpperCamelCase : List[Any] = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase : Optional[int] = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1])) UpperCamelCase : Dict = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
351
"""simple docstring""" from math import isqrt def A ( snake_case :int ) -> list[int]: __UpperCamelCase = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , snake_case , snake_case ): __UpperCamelCase = False return [i for i in range(2 , snake_case ) if is_prime[i]] def A ( snake_case :int = 1_0**8 ) -> int: __UpperCamelCase = calculate_prime_numbers(max_number // 2 ) __UpperCamelCase = 0 __UpperCamelCase = 0 __UpperCamelCase = len(snake_case ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f'''{solution() = }''')
263
0
import datasets __A = "\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n" __A = "\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n" __A = "\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n 'accuracy': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n" def lowerCAmelCase_ ( __a , __a ) -> List[str]: """simple docstring""" return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def SCREAMING_SNAKE_CASE_ (self : int) ->Union[str, Any]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32"), }) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]) ->Union[str, Any]: '''simple docstring''' return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
10
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def _UpperCamelCase ( ): '''simple docstring''' import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join UpperCAmelCase__ = """__test_patch_submodule_mock__""" with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def _UpperCamelCase ( ): '''simple docstring''' assert _test_patching.open is open UpperCAmelCase__ = """__test_patch_submodule_builtin_mock__""" # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_mock__""" with patch_submodule(_test_patching , """pandas.read_csv""" , SCREAMING_SNAKE_CASE__ ): pass def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_missing_builtin_mock__""" # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ) is None with patch_submodule(_test_patching , """len""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.len is mock assert _test_patching.len is len def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_start_and_stop_mock__""" UpperCAmelCase__ = patch_submodule(_test_patching , """open""" , SCREAMING_SNAKE_CASE__ ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def _UpperCamelCase ( ): '''simple docstring''' from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join UpperCAmelCase__ = """__test_patch_submodule_successive_join__""" UpperCAmelCase__ = """__test_patch_submodule_successive_dirname__""" UpperCAmelCase__ = """__test_patch_submodule_successive_rename__""" assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , """os.rename""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.join""" , SCREAMING_SNAKE_CASE__ ): with patch_submodule(_test_patching , """os.path.dirname""" , SCREAMING_SNAKE_CASE__ ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def _UpperCamelCase ( ): '''simple docstring''' UpperCAmelCase__ = """__test_patch_submodule_doesnt_exist_mock__""" with patch_submodule(_test_patching , """__module_that_doesn_exist__.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass with patch_submodule(_test_patching , """os.__attribute_that_doesn_exist__""" , SCREAMING_SNAKE_CASE__ ): pass
346
0
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version A : int = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") A : Union[str, Any] = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization A : Tuple = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } A : Optional[int] = sorted(arg_to_scheduler.keys()) A : str = "{" + ", ".join(arg_to_scheduler_choices) + "}" class lowerCamelCase (pl.LightningModule ): """simple docstring""" def __init__( self : int , __magic_name__ : argparse.Namespace , __magic_name__ : List[Any]=None , __magic_name__ : Union[str, Any]="base" , __magic_name__ : List[str]=None , __magic_name__ : List[Any]=None , __magic_name__ : str=None , **__magic_name__ : Union[str, Any] , ) -> int: super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(__magic_name__ ) SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = Path(self.hparams.output_dir ) SCREAMING_SNAKE_CASE_ = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , ) else: SCREAMING_SNAKE_CASE_ = config SCREAMING_SNAKE_CASE_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams , __magic_name__ , __magic_name__ ): assert hasattr(self.config , __magic_name__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) ) if tokenizer is None: SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , ) else: SCREAMING_SNAKE_CASE_ = tokenizer SCREAMING_SNAKE_CASE_ = MODEL_MODES[mode] if model is None: SCREAMING_SNAKE_CASE_ = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , ) else: SCREAMING_SNAKE_CASE_ = model def __A ( self : int , *__magic_name__ : List[Any] , **__magic_name__ : Tuple ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ ) def __A ( self : Optional[Any] ) -> Dict: SCREAMING_SNAKE_CASE_ = arg_to_scheduler[self.hparams.lr_scheduler] SCREAMING_SNAKE_CASE_ = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) SCREAMING_SNAKE_CASE_ = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def __A ( self : List[Any] ) -> List[Any]: SCREAMING_SNAKE_CASE_ = self.model SCREAMING_SNAKE_CASE_ = ["bias", "LayerNorm.weight"] SCREAMING_SNAKE_CASE_ = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], "weight_decay": 0.0, }, ] if self.hparams.adafactor: SCREAMING_SNAKE_CASE_ = Adafactor( __magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ ) else: SCREAMING_SNAKE_CASE_ = AdamW( __magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) SCREAMING_SNAKE_CASE_ = optimizer SCREAMING_SNAKE_CASE_ = self.get_lr_scheduler() return [optimizer], [scheduler] def __A ( self : int , __magic_name__ : List[Any] , __magic_name__ : Any ) -> Optional[int]: return self.validation_step(__magic_name__ , __magic_name__ ) def __A ( self : Dict , __magic_name__ : Any ) -> List[Any]: return self.validation_end(__magic_name__ ) def __A ( self : Dict ) -> int: SCREAMING_SNAKE_CASE_ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores SCREAMING_SNAKE_CASE_ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def __A ( self : Dict , __magic_name__ : str ) -> Union[str, Any]: if stage == "test": SCREAMING_SNAKE_CASE_ = len(self.test_dataloader().dataset ) else: SCREAMING_SNAKE_CASE_ = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=__magic_name__ ) SCREAMING_SNAKE_CASE_ = len(self.train_dataloader().dataset ) def __A ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : int , __magic_name__ : bool = False ) -> Dict: raise NotImplementedError("You must implement this for your task" ) def __A ( self : Tuple ) -> List[str]: return self.train_loader def __A ( self : Optional[int] ) -> Optional[int]: return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=__magic_name__ ) def __A ( self : str ) -> Union[str, Any]: return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=__magic_name__ ) def __A ( self : Optional[int] , __magic_name__ : Tuple ) -> Tuple: return os.path.join( self.hparams.data_dir , "cached_{}_{}_{}".format( __magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def __A ( self : Optional[Any] , __magic_name__ : Dict[str, Any] ) -> None: SCREAMING_SNAKE_CASE_ = self.output_dir.joinpath("best_tfmr" ) SCREAMING_SNAKE_CASE_ = self.step_count self.model.save_pretrained(__magic_name__ ) self.tokenizer.save_pretrained(__magic_name__ ) @staticmethod def __A ( __magic_name__ : Optional[int] , __magic_name__ : Tuple ) -> Union[str, Any]: parser.add_argument( "--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--config_name" , default="" , type=__magic_name__ , help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name" , default=__magic_name__ , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument( "--cache_dir" , default=str(Path(__magic_name__ ).parent / "test_run" / "cache" ) , type=__magic_name__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , ) parser.add_argument( "--encoder_layerdrop" , type=__magic_name__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--decoder_layerdrop" , type=__magic_name__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--dropout" , type=__magic_name__ , help="Dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--attention_dropout" , type=__magic_name__ , help="Attention dropout probability (Optional). Goes into model.config" , ) parser.add_argument("--learning_rate" , default=5e-5 , type=__magic_name__ , help="The initial learning rate for Adam." ) parser.add_argument( "--lr_scheduler" , default="linear" , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help="Learning rate scheduler" , ) parser.add_argument("--weight_decay" , default=0.0 , type=__magic_name__ , help="Weight decay if we apply some." ) parser.add_argument("--adam_epsilon" , default=1e-8 , type=__magic_name__ , help="Epsilon for Adam optimizer." ) parser.add_argument("--warmup_steps" , default=0 , type=__magic_name__ , help="Linear warmup over warmup_steps." ) parser.add_argument("--num_workers" , default=4 , type=__magic_name__ , help="kwarg passed to DataLoader" ) parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=__magic_name__ ) parser.add_argument("--train_batch_size" , default=32 , type=__magic_name__ ) parser.add_argument("--eval_batch_size" , default=32 , type=__magic_name__ ) parser.add_argument("--adafactor" , action="store_true" ) class lowerCamelCase (pl.Callback ): """simple docstring""" def __A ( self : str , __magic_name__ : Dict , __magic_name__ : Optional[int] ) -> Dict: if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): """simple docstring""" def __A ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] ) -> int: # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(__magic_name__ ) class lowerCamelCase (pl.Callback ): """simple docstring""" def __A ( self : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Union[str, Any]: SCREAMING_SNAKE_CASE_ = trainer.lr_schedulers[0]["scheduler"] SCREAMING_SNAKE_CASE_ = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(__magic_name__ ) def __A ( self : Tuple , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> Union[str, Any]: rank_zero_info("***** Validation results *****" ) SCREAMING_SNAKE_CASE_ = trainer.callback_metrics # Log results for key in sorted(__magic_name__ ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(__magic_name__ , str(metrics[key] ) ) ) def __A ( self : Optional[Any] , __magic_name__ : pl.Trainer , __magic_name__ : pl.LightningModule ) -> str: rank_zero_info("***** Test results *****" ) SCREAMING_SNAKE_CASE_ = trainer.callback_metrics # Log and save results to file SCREAMING_SNAKE_CASE_ = os.path.join(pl_module.hparams.output_dir , "test_results.txt" ) with open(__magic_name__ , "w" ) as writer: for key in sorted(__magic_name__ ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(__magic_name__ , str(metrics[key] ) ) ) writer.write("{} = {}\n".format(__magic_name__ , str(metrics[key] ) ) ) def a__ ( __UpperCamelCase , __UpperCamelCase ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir" , default=str(Path(__UpperCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=__UpperCamelCase , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=__UpperCamelCase , default="O2" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=__UpperCamelCase ) parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=__UpperCamelCase , help="Max gradient norm" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." ) parser.add_argument( "--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=__UpperCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--seed" , type=__UpperCamelCase , default=4_2 , help="random seed for initialization" ) parser.add_argument( "--data_dir" , default=str(Path(__UpperCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=__UpperCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , ) def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[] , __UpperCamelCase=None , __UpperCamelCase=None , **__UpperCamelCase , ): pl.seed_everything(args.seed ) # init model SCREAMING_SNAKE_CASE_ = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=__UpperCamelCase ) # add custom checkpoints if checkpoint_callback is None: SCREAMING_SNAKE_CASE_ = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(__UpperCamelCase ) if logging_callback is None: SCREAMING_SNAKE_CASE_ = LoggingCallback() SCREAMING_SNAKE_CASE_ = {} if args.fpaa: SCREAMING_SNAKE_CASE_ = 1_6 if args.gpus > 1: SCREAMING_SNAKE_CASE_ = "auto" SCREAMING_SNAKE_CASE_ = "ddp" SCREAMING_SNAKE_CASE_ = args.accumulate_grad_batches SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = "auto" SCREAMING_SNAKE_CASE_ = pl.Trainer.from_argparse_args( __UpperCamelCase , weights_summary=__UpperCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__UpperCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__UpperCamelCase , ) if args.do_train: trainer.fit(__UpperCamelCase ) else: print("RAG modeling tests with new set functions successfuly executed!" ) return trainer
305
from __future__ import annotations import collections import pprint from pathlib import Path def a__ ( __UpperCamelCase ): return "".join(sorted(__UpperCamelCase ) ) def a__ ( __UpperCamelCase ): return word_by_signature[signature(__UpperCamelCase )] A : str = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8") A : int = sorted({word.strip().lower() for word in data.splitlines()}) A : Tuple = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": A : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: file.write("all_anagrams = \n ") file.write(pprint.pformat(all_anagrams))
305
1
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]: return None class lowerCAmelCase_: '''simple docstring''' def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: return None class lowerCAmelCase_( unittest.TestCase ): '''simple docstring''' __lowercase : Dict = [ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def UpperCAmelCase_ ( self ) -> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) @require_torch @slow def UpperCAmelCase_ ( self ) -> Any: from transformers import BertModel lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__UpperCAmelCase ) ) vocab_file.flush() lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) ) model.save_pretrained(__UpperCAmelCase ) self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase ) @require_tf @slow def UpperCAmelCase_ ( self ) -> List[str]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def UpperCAmelCase_ ( self ) -> List[Any]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase ) lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]: try: # Compute path with TemporaryDirectory() as tempdir: lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) return path except Exception as e: self.fail(__UpperCAmelCase ) @require_torch @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: from transformers import BertModel lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" ) @require_tf @require_tokenizers @slow def UpperCAmelCase_ ( self ) -> Optional[int]: from transformers import TFBertModel lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" ) def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple: lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase ) lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase ) # Assert all variables are present self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} ) def UpperCAmelCase_ ( self ) -> Optional[int]: lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCAmelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCAmelCase ) ,1 ) self.assertEqual(len(__UpperCAmelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] ,"""input_ids""" ) def UpperCAmelCase_ ( self ) -> Tuple: lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
37
'''simple docstring''' def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : str = set() # Replace all the whitespace in our sentence lowerCAmelCase__ : Tuple = input_str.replace(""" """ , """""" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCamelCase ) == 26 def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" lowerCAmelCase__ : Any = [False] * 26 for char in input_str: if char.islower(): lowerCAmelCase__ : Optional[Any] = True elif char.isupper(): lowerCAmelCase__ : Any = True return all(UpperCamelCase ) def _SCREAMING_SNAKE_CASE ( UpperCamelCase = "The quick brown fox jumps over the lazy dog" , ): """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _SCREAMING_SNAKE_CASE ( ): """simple docstring""" from timeit import timeit lowerCAmelCase__ : Union[str, Any] = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest""" print(timeit("""is_pangram()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_faster()""" , setup=UpperCamelCase ) ) print(timeit("""is_pangram_fastest()""" , setup=UpperCamelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
37
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class __lowerCamelCase ( a__ ): '''simple docstring''' A_ : int = 'switch_transformers' A_ : Tuple = ['past_key_values'] A_ : Dict = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , __UpperCAmelCase=32128 , __UpperCAmelCase=768 , __UpperCAmelCase=64 , __UpperCAmelCase=2048 , __UpperCAmelCase=64 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=False , __UpperCAmelCase=0.01 , __UpperCAmelCase="float32" , __UpperCAmelCase=False , __UpperCAmelCase=32 , __UpperCAmelCase=128 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-6 , __UpperCAmelCase=0.001 , __UpperCAmelCase=0.001 , __UpperCAmelCase=1.0 , __UpperCAmelCase="relu" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0 , __UpperCAmelCase=1 , **__UpperCAmelCase , ) -> List[Any]: _a = vocab_size _a = d_model _a = d_kv _a = d_ff _a = num_sparse_encoder_layers _a = num_layers _a = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _a = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: _a = self.num_layers // self.num_sparse_encoder_layers else: _a = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: _a = self.num_decoder_layers // self.num_sparse_decoder_layers else: _a = self.num_decoder_layers # HACK: this will create 0 sparse layers _a = num_heads _a = num_experts _a = expert_capacity _a = router_bias _a = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' ) _a = router_dtype _a = router_ignore_padding_tokens _a = relative_attention_num_buckets _a = relative_attention_max_distance _a = dropout_rate _a = layer_norm_epsilon _a = initializer_factor _a = feed_forward_proj _a = use_cache _a = add_router_probs _a = router_z_loss_coef _a = router_aux_loss_coef _a = self.feed_forward_proj.split('''-''' ) _a = act_info[-1] _a = act_info[0] == '''gated''' if len(__UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase ) > 2: raise ValueError( F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": _a = '''gelu_new''' super().__init__( pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase , )
153
"""simple docstring""" from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=a__ ) class __lowerCamelCase ( a__ ): '''simple docstring''' # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization A_ : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} ) A_ : ClassVar[Features] = Features({'text': Value('string' )} ) A_ : ClassVar[Features] = Features({'summary': Value('string' )} ) A_ : str = "text" A_ : str = "summary" @property def _UpperCAmelCase ( self ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
153
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowerCamelCase : Union[str, Any] =logging.get_logger(__name__) lowerCamelCase : Dict ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase : Tuple ={ 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } lowerCamelCase : int ={ 'squeezebert/squeezebert-uncased': 512, 'squeezebert/squeezebert-mnli': 512, 'squeezebert/squeezebert-mnli-headless': 512, } lowerCamelCase : str ={ 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class __a ( _SCREAMING_SNAKE_CASE ): _lowerCAmelCase : List[str] = VOCAB_FILES_NAMES _lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCAmelCase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION _lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCAmelCase : Union[str, Any] = SqueezeBertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str="[UNK]" , SCREAMING_SNAKE_CASE : Union[str, Any]="[SEP]" , SCREAMING_SNAKE_CASE : List[Any]="[PAD]" , SCREAMING_SNAKE_CASE : str="[CLS]" , SCREAMING_SNAKE_CASE : Dict="[MASK]" , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Dict=None , **SCREAMING_SNAKE_CASE : str , ): '''simple docstring''' super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) UpperCamelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars ): UpperCamelCase__ : Dict = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) ) UpperCamelCase__ : Union[str, Any] = do_lower_case UpperCamelCase__ : Optional[int] = strip_accents UpperCamelCase__ : Optional[Any] = tokenize_chinese_chars UpperCamelCase__ : Optional[int] = normalizer_class(**_lowerCAmelCase ) UpperCamelCase__ : List[str] = do_lower_case def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' UpperCamelCase__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ): '''simple docstring''' UpperCamelCase__ : Any = [self.sep_token_id] UpperCamelCase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ): '''simple docstring''' UpperCamelCase__ : Dict = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
189
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ : List[str] = logging.get_logger(__name__) lowerCamelCase__ : List[str] = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' lowercase_ = "speech_to_text" lowercase_ = ["past_key_values"] lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ): SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = d_model SCREAMING_SNAKE_CASE_ = encoder_ffn_dim SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = encoder_attention_heads SCREAMING_SNAKE_CASE_ = decoder_ffn_dim SCREAMING_SNAKE_CASE_ = decoder_layers SCREAMING_SNAKE_CASE_ = decoder_attention_heads SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = activation_function SCREAMING_SNAKE_CASE_ = init_std SCREAMING_SNAKE_CASE_ = encoder_layerdrop SCREAMING_SNAKE_CASE_ = decoder_layerdrop SCREAMING_SNAKE_CASE_ = use_cache SCREAMING_SNAKE_CASE_ = encoder_layers SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE_ = max_source_positions SCREAMING_SNAKE_CASE_ = max_target_positions SCREAMING_SNAKE_CASE_ = num_conv_layers SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase ) SCREAMING_SNAKE_CASE_ = conv_channels SCREAMING_SNAKE_CASE_ = input_feat_per_channel SCREAMING_SNAKE_CASE_ = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, " F"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
225
0
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class __magic_name__ : '''simple docstring''' def __init__( self , _a , _a=13 , _a=30 , _a=2 , _a=3 , _a=True , _a=True , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=10 , _a=0.02 , _a=3 , _a=None , _a=2 , ): """simple docstring""" lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = image_size lowerCamelCase = patch_size lowerCamelCase = num_channels lowerCamelCase = is_training lowerCamelCase = use_labels lowerCamelCase = hidden_size lowerCamelCase = num_hidden_layers lowerCamelCase = num_attention_heads lowerCamelCase = intermediate_size lowerCamelCase = hidden_act lowerCamelCase = hidden_dropout_prob lowerCamelCase = attention_probs_dropout_prob lowerCamelCase = type_sequence_label_size lowerCamelCase = initializer_range lowerCamelCase = scope lowerCamelCase = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowerCamelCase = (image_size // patch_size) ** 2 lowerCamelCase = num_patches + 2 def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase = None if self.use_labels: lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = DeiTModel(config=_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = DeiTForMaskedImageModeling(config=_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase = 1 lowerCamelCase = DeiTForMaskedImageModeling(_a ) model.to(_a ) model.eval() lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase = model(_a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _lowerCAmelCase ( self , _a , _a , _a ): """simple docstring""" lowerCamelCase = self.type_sequence_label_size lowerCamelCase = DeiTForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase = 1 lowerCamelCase = DeiTForImageClassification(_a ) model.to(_a ) model.eval() lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) = config_and_inputs lowerCamelCase = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": DeiTModel, "image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = DeiTModelTester(self ) lowerCamelCase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def _lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""" ) def _lowerCAmelCase ( self ): """simple docstring""" pass def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a , nn.Linear ) ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase = model_class(_a ) lowerCamelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase = [*signature.parameters.keys()] lowerCamelCase = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_a ) def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) def _lowerCAmelCase ( self , _a , _a , _a=False ): """simple docstring""" lowerCamelCase = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _lowerCAmelCase ( self ): """simple docstring""" if not self.model_tester.is_training: return lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(_a ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue lowerCamelCase = model_class(_a ) model.to(_a ) model.train() lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a ) lowerCamelCase = model(**_a ).loss loss.backward() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCamelCase = False lowerCamelCase = True for model_class in self.all_model_classes: if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue lowerCamelCase = model_class(_a ) model.gradient_checkpointing_enable() model.to(_a ) model.train() lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a ) lowerCamelCase = model(**_a ).loss loss.backward() def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase = [ {"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float}, {"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long}, {"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(_a ), *get_values(_a ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ): lowerCamelCase = problem_type["""title"""] lowerCamelCase = problem_type["""num_labels"""] lowerCamelCase = model_class(_a ) model.to(_a ) model.train() lowerCamelCase = self._prepare_for_class(_a , _a , return_labels=_a ) if problem_type["num_labels"] > 1: lowerCamelCase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] ) lowerCamelCase = inputs["""labels"""].to(problem_type["""dtype"""] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=_a ) as warning_list: lowerCamelCase = model(**_a ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def _lowerCAmelCase ( self ): """simple docstring""" for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase = DeiTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def a__ ( ) -> int: lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class __magic_name__ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase ( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ) if is_vision_available() else None ) @slow def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to( _a ) lowerCamelCase = self.default_image_processor lowerCamelCase = prepare_img() lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): lowerCamelCase = model(**_a ) # verify the logits lowerCamelCase = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _a ) lowerCamelCase = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def _lowerCAmelCase ( self ): """simple docstring""" lowerCamelCase = DeiTModel.from_pretrained( """facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" ) lowerCamelCase = self.default_image_processor lowerCamelCase = prepare_img() lowerCamelCase = image_processor(images=_a , return_tensors="""pt""" ) lowerCamelCase = inputs.pixel_values.to(_a ) # forward pass to make sure inference works in fp16 with torch.no_grad(): lowerCamelCase = model(_a )
168
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def a__ ( snake_case__ ) -> Optional[Any]: lowerCamelCase = {} lowerCamelCase = job["""started_at"""] lowerCamelCase = job["""completed_at"""] lowerCamelCase = date_parser.parse(snake_case__ ) lowerCamelCase = date_parser.parse(snake_case__ ) lowerCamelCase = round((end_datetime - start_datetime).total_seconds() / 60.0 ) lowerCamelCase = start lowerCamelCase = end lowerCamelCase = duration_in_min return job_info def a__ ( snake_case__ , snake_case__=None ) -> Optional[Any]: lowerCamelCase = None if token is not None: lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'Bearer {token}'} lowerCamelCase = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' lowerCamelCase = requests.get(snake_case__ , headers=snake_case__ ).json() lowerCamelCase = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) lowerCamelCase = math.ceil((result["""total_count"""] - 1_00) / 1_00 ) for i in range(snake_case__ ): lowerCamelCase = requests.get(url + F'&page={i + 2}' , headers=snake_case__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") lowerCAmelCase : Any = parser.parse_args() lowerCAmelCase : Optional[int] = get_job_time(args.workflow_run_id) lowerCAmelCase : Dict = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"""{k}: {v['duration']}""")
168
1
'''simple docstring''' from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def UpperCamelCase_ ( _UpperCAmelCase : int ) -> int: """simple docstring""" _UpperCAmelCase : str = prime_factors(_UpperCAmelCase ) if is_square_free(_UpperCAmelCase ): return -1 if len(_UpperCAmelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
31
'''simple docstring''' class lowerCamelCase_ : '''simple docstring''' def __init__( self : Tuple , A : Any , A : str , A : Union[str, Any] ): _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : Any = graph self._normalize_graph(A , A ) _UpperCAmelCase : List[str] = len(A ) _UpperCAmelCase : Tuple = None def _A ( self : Any , A : List[Any] , A : str ): if sources is int: _UpperCAmelCase : List[Any] = [sources] if sinks is int: _UpperCAmelCase : List[Any] = [sinks] if len(A ) == 0 or len(A ) == 0: return _UpperCAmelCase : str = sources[0] _UpperCAmelCase : Union[str, Any] = sinks[0] # make fake vertex if there are more # than one source or sink if len(A ) > 1 or len(A ) > 1: _UpperCAmelCase : Dict = 0 for i in sources: max_input_flow += sum(self.graph[i] ) _UpperCAmelCase : str = len(self.graph ) + 1 for room in self.graph: room.insert(0 , 0 ) self.graph.insert(0 , [0] * size ) for i in sources: _UpperCAmelCase : Optional[Any] = max_input_flow _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : str = len(self.graph ) + 1 for room in self.graph: room.append(0 ) self.graph.append([0] * size ) for i in sinks: _UpperCAmelCase : Dict = max_input_flow _UpperCAmelCase : List[Any] = size - 1 def _A ( self : Union[str, Any] ): if self.maximum_flow_algorithm is None: raise Exception("You need to set maximum flow algorithm before." ) if self.source_index is None or self.sink_index is None: return 0 self.maximum_flow_algorithm.execute() return self.maximum_flow_algorithm.getMaximumFlow() def _A ( self : Tuple , A : Dict ): _UpperCAmelCase : str = algorithm(self ) class lowerCamelCase_ : '''simple docstring''' def __init__( self : Any , A : str ): _UpperCAmelCase : Optional[int] = flow_network _UpperCAmelCase : Any = flow_network.verticesCount _UpperCAmelCase : List[str] = flow_network.sourceIndex _UpperCAmelCase : Union[str, Any] = flow_network.sinkIndex # it's just a reference, so you shouldn't change # it in your algorithms, use deep copy before doing that _UpperCAmelCase : Any = flow_network.graph _UpperCAmelCase : Union[str, Any] = False def _A ( self : List[str] ): if not self.executed: self._algorithm() _UpperCAmelCase : int = True def _A ( self : List[Any] ): pass class lowerCamelCase_ (snake_case__ ): '''simple docstring''' def __init__( self : Optional[int] , A : Union[str, Any] ): super().__init__(A ) # use this to save your result _UpperCAmelCase : Any = -1 def _A ( self : Union[str, Any] ): if not self.executed: raise Exception("You should execute algorithm before using its result!" ) return self.maximum_flow class lowerCamelCase_ (snake_case__ ): '''simple docstring''' def __init__( self : Tuple , A : int ): super().__init__(A ) _UpperCAmelCase : List[str] = [[0] * self.verticies_count for i in range(self.verticies_count )] _UpperCAmelCase : Union[str, Any] = [0] * self.verticies_count _UpperCAmelCase : int = [0] * self.verticies_count def _A ( self : Dict ): _UpperCAmelCase : Dict = self.verticies_count # push some substance to graph for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ): self.preflow[self.source_index][nextvertex_index] += bandwidth self.preflow[nextvertex_index][self.source_index] -= bandwidth self.excesses[nextvertex_index] += bandwidth # Relabel-to-front selection rule _UpperCAmelCase : Optional[int] = [ i for i in range(self.verticies_count ) if i != self.source_index and i != self.sink_index ] # move through list _UpperCAmelCase : Any = 0 while i < len(A ): _UpperCAmelCase : int = vertices_list[i] _UpperCAmelCase : int = self.heights[vertex_index] self.process_vertex(A ) if self.heights[vertex_index] > previous_height: # if it was relabeled, swap elements # and start from 0 index vertices_list.insert(0 , vertices_list.pop(A ) ) _UpperCAmelCase : Union[str, Any] = 0 else: i += 1 _UpperCAmelCase : List[Any] = sum(self.preflow[self.source_index] ) def _A ( self : Union[str, Any] , A : str ): while self.excesses[vertex_index] > 0: for neighbour_index in range(self.verticies_count ): # if it's neighbour and current vertex is higher if ( self.graph[vertex_index][neighbour_index] - self.preflow[vertex_index][neighbour_index] > 0 and self.heights[vertex_index] > self.heights[neighbour_index] ): self.push(A , A ) self.relabel(A ) def _A ( self : int , A : Dict , A : List[str] ): _UpperCAmelCase : int = min( self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , ) self.preflow[from_index][to_index] += preflow_delta self.preflow[to_index][from_index] -= preflow_delta self.excesses[from_index] -= preflow_delta self.excesses[to_index] += preflow_delta def _A ( self : Optional[int] , A : Union[str, Any] ): _UpperCAmelCase : str = None for to_index in range(self.verticies_count ): if ( self.graph[vertex_index][to_index] - self.preflow[vertex_index][to_index] > 0 ) and (min_height is None or self.heights[to_index] < min_height): _UpperCAmelCase : Tuple = self.heights[to_index] if min_height is not None: _UpperCAmelCase : Optional[Any] = min_height + 1 if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = [0] __SCREAMING_SNAKE_CASE : Union[str, Any] = [3] # graph = [ # [0, 0, 4, 6, 0, 0], # [0, 0, 5, 2, 0, 0], # [0, 0, 0, 0, 4, 4], # [0, 0, 0, 0, 6, 6], # [0, 0, 0, 0, 0, 0], # [0, 0, 0, 0, 0, 0], # ] __SCREAMING_SNAKE_CASE : List[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] # prepare our network __SCREAMING_SNAKE_CASE : Union[str, Any] = FlowNetwork(graph, entrances, exits) # set algorithm flow_network.set_maximum_flow_algorithm(PushRelabelExecutor) # and calculate __SCREAMING_SNAKE_CASE : Optional[Any] = flow_network.find_maximum_flow() print(F'maximum flow is {maximum_flow}')
31
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=18 , SCREAMING_SNAKE_CASE__ : Optional[int]=30 , SCREAMING_SNAKE_CASE__ : Dict=4_00 , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.48145466, 0.4578275, 0.40821073] , SCREAMING_SNAKE_CASE__ : Any=[0.26862954, 0.26130258, 0.27577711] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , ) -> Any: __lowerCamelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24} __lowerCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = num_channels __lowerCamelCase = image_size __lowerCamelCase = min_resolution __lowerCamelCase = max_resolution __lowerCamelCase = do_resize __lowerCamelCase = size __lowerCamelCase = do_center_crop __lowerCamelCase = crop_size __lowerCamelCase = do_normalize __lowerCamelCase = image_mean __lowerCamelCase = image_std __lowerCamelCase = do_convert_rgb def __A ( self : int ) -> Union[str, Any]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def __A ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=False ) -> List[Any]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: __lowerCamelCase = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: __lowerCamelCase = [] for i in range(self.batch_size ): __lowerCamelCase , __lowerCamelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension __lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs] if torchify: __lowerCamelCase = [torch.from_numpy(SCREAMING_SNAKE_CASE__ ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : int = ChineseCLIPImageProcessor if is_vision_available() else None def __A ( self : Optional[Any] ) -> str: __lowerCamelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE__ ) @property def __A ( self : str ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Optional[Any] ) -> Any: __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) ) def __A ( self : Dict ) -> Any: __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_24, '''width''': 2_24} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __A ( self : Union[str, Any] ) -> int: pass def __A ( self : Tuple ) -> Optional[int]: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __A ( self : List[str] ) -> int: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __A ( self : Optional[Any] ) -> Optional[Any]: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class lowerCAmelCase__ ( __lowercase , unittest.TestCase ): a__ : Optional[Any] = ChineseCLIPImageProcessor if is_vision_available() else None def __A ( self : Tuple ) -> Union[str, Any]: __lowerCamelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = 3 @property def __A ( self : Dict ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __A ( self : Any ) -> Tuple: __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''center_crop''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_convert_rgb''' ) ) def __A ( self : Any ) -> List[str]: pass def __A ( self : Optional[Any] ) -> Union[str, Any]: # Initialize image_processing __lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCamelCase = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE__ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image ) # Test not batched input __lowerCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __lowerCamelCase = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
339
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } SCREAMING_SNAKE_CASE__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } SCREAMING_SNAKE_CASE__ : int = {"facebook/blenderbot_small-90M": 512} def __magic_name__ ( __lowerCAmelCase : List[str] ) -> Tuple: __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char __lowerCamelCase = set(__lowerCAmelCase ) return pairs class lowerCAmelCase__ ( __lowercase ): a__ : List[Any] = VOCAB_FILES_NAMES a__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP a__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ : Dict = ["""input_ids""", """attention_mask"""] def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple="__start__" , SCREAMING_SNAKE_CASE__ : Tuple="__end__" , SCREAMING_SNAKE_CASE__ : List[str]="__unk__" , SCREAMING_SNAKE_CASE__ : str="__null__" , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Optional[Any]: super().__init__(unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as vocab_handle: __lowerCamelCase = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} with open(SCREAMING_SNAKE_CASE__ , encoding='''utf-8''' ) as merges_handle: __lowerCamelCase = merges_handle.read().split('''\n''' )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in merges] __lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) ) __lowerCamelCase = {} @property def __A ( self : Dict ) -> int: return len(self.encoder ) def __A ( self : str ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __A ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> str: if token in self.cache: return self.cache[token] __lowerCamelCase = re.sub('''([.,!?()])''' , R''' \1''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub('''(\')''' , R''' \1 ''' , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = re.sub(R'''\s{2,}''' , ''' ''' , SCREAMING_SNAKE_CASE__ ) if "\n" in token: __lowerCamelCase = token.replace('''\n''' , ''' __newln__''' ) __lowerCamelCase = token.split(''' ''' ) __lowerCamelCase = [] for token in tokens: if not len(SCREAMING_SNAKE_CASE__ ): continue __lowerCamelCase = token.lower() __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) if not pairs: words.append(SCREAMING_SNAKE_CASE__ ) continue while True: __lowerCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(SCREAMING_SNAKE_CASE__ ): try: __lowerCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) new_word.extend(word[i:j] ) __lowerCamelCase = j except ValueError: new_word.extend(word[i:] ) break if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = new_word if len(SCREAMING_SNAKE_CASE__ ) == 1: break else: __lowerCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = '''@@ '''.join(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase = word[:-4] __lowerCamelCase = word words.append(SCREAMING_SNAKE_CASE__ ) return " ".join(SCREAMING_SNAKE_CASE__ ) def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> List[str]: __lowerCamelCase = [] __lowerCamelCase = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE__ ) for token in words: split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__ ).split(''' ''' ) ) ) return split_tokens def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> int: __lowerCamelCase = token.lower() return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) ) def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> str: return self.decoder.get(SCREAMING_SNAKE_CASE__ , self.unk_token ) def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str: __lowerCamelCase = ''' '''.join(SCREAMING_SNAKE_CASE__ ).replace('''@@ ''' , '''''' ).strip() return out_string def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCamelCase = os.path.join( SCREAMING_SNAKE_CASE__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '''\n''' ) __lowerCamelCase = 0 with open(SCREAMING_SNAKE_CASE__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __lowerCamelCase = token_index writer.write(''' '''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' ) index += 1 return vocab_file, merge_file
339
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : List[str] = logging.get_logger(__name__) lowerCAmelCase_ : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __SCREAMING_SNAKE_CASE (_UpperCAmelCase ): """simple docstring""" __a ='sew-d' def __init__( self : str , __a : Union[str, Any]=32 , __a : Optional[int]=7_68 , __a : str=12 , __a : List[Any]=12 , __a : Dict=30_72 , __a : Union[str, Any]=2 , __a : List[str]=5_12 , __a : Optional[Any]=2_56 , __a : int=True , __a : int=True , __a : Optional[int]=("p2c", "c2p") , __a : Optional[Any]="layer_norm" , __a : int="gelu_python" , __a : Union[str, Any]=0.1 , __a : Any=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=0.0 , __a : Any=0.1 , __a : List[Any]=0.02 , __a : Optional[Any]=1e-7 , __a : Union[str, Any]=1e-5 , __a : Tuple="group" , __a : Optional[int]="gelu" , __a : str=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , __a : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a : Optional[int]=False , __a : List[str]=1_28 , __a : Optional[int]=16 , __a : List[Any]=True , __a : int=0.05 , __a : List[str]=10 , __a : Dict=2 , __a : str=0.0 , __a : List[Any]=10 , __a : Any=0 , __a : int="mean" , __a : str=False , __a : Optional[Any]=False , __a : List[str]=2_56 , __a : List[Any]=0 , __a : int=1 , __a : Union[str, Any]=2 , **__a : Any , ): super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ ) _a = hidden_size _a = feat_extract_norm _a = feat_extract_activation _a = list(A_ ) _a = list(A_ ) _a = list(A_ ) _a = conv_bias _a = num_conv_pos_embeddings _a = num_conv_pos_embedding_groups _a = len(self.conv_dim ) _a = num_hidden_layers _a = intermediate_size _a = squeeze_factor _a = max_position_embeddings _a = position_buckets _a = share_att_key _a = relative_attention _a = norm_rel_ebd _a = list(A_ ) _a = hidden_act _a = num_attention_heads _a = hidden_dropout _a = attention_dropout _a = activation_dropout _a = feat_proj_dropout _a = final_dropout _a = layer_norm_eps _a = feature_layer_norm_eps _a = initializer_range _a = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _a = apply_spec_augment _a = mask_time_prob _a = mask_time_length _a = mask_time_min_masks _a = mask_feature_prob _a = mask_feature_length _a = mask_feature_min_masks # ctc loss _a = ctc_loss_reduction _a = ctc_zero_infinity # sequence classification _a = use_weighted_layer_sum _a = classifier_proj_size @property def UpperCamelCase__ ( self : str ): return functools.reduce(operator.mul , self.conv_stride , 1 )
63
def _lowercase ( lowercase__ , lowercase__ ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) __lowerCAmelCase : int = str(bin(lowercase__ ) )[2:] # remove the leading "0b" __lowerCAmelCase : Any = str(bin(lowercase__ ) )[2:] __lowerCAmelCase : List[str] = max(len(lowercase__ ) , len(lowercase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowercase__ ) , b_binary.zfill(lowercase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
0
"""simple docstring""" import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( lowerCAmelCase , unittest.TestCase ): """simple docstring""" _lowerCAmelCase : List[str] = CanineTokenizer _lowerCAmelCase : Optional[Any] = False def snake_case ( self ): """simple docstring""" super().setUp() snake_case = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case ( self ): """simple docstring""" return CanineTokenizer.from_pretrained('google/canine-s' ) def snake_case ( self , **lowerCAmelCase ): """simple docstring""" snake_case = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase ) snake_case = 10_24 return tokenizer @require_torch def snake_case ( self ): """simple docstring""" snake_case = self.canine_tokenizer snake_case = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.'] # fmt: off snake_case = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0] # fmt: on snake_case = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase ) snake_case = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def snake_case ( self ): """simple docstring""" snake_case = self.canine_tokenizer snake_case = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.'] snake_case = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors='pt' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('input_ids' , lowerCAmelCase ) self.assertIn('attention_mask' , lowerCAmelCase ) self.assertIn('token_type_ids' , lowerCAmelCase ) @require_torch def snake_case ( self ): """simple docstring""" snake_case = self.canine_tokenizer snake_case = [ 'What\'s the weater?', 'It\'s about 25 degrees.', ] snake_case = tokenizer( text_target=lowerCAmelCase , max_length=32 , padding='max_length' , truncation=lowerCAmelCase , return_tensors='pt' ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def snake_case ( self ): """simple docstring""" snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc snake_case = tempfile.mkdtemp() snake_case = ' He is very happy, UNwant\u00E9d,running' snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) tokenizer.save_pretrained(lowerCAmelCase ) snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase ) snake_case = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) shutil.rmtree(lowerCAmelCase ) snake_case = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc snake_case = tempfile.mkdtemp() snake_case = ' He is very happy, UNwant\u00E9d,running' snake_case = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: snake_case = chr(0xE_0_0_7 ) additional_special_tokens.append(lowerCAmelCase ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) tokenizer.save_pretrained(lowerCAmelCase ) snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase ) snake_case = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) self.assertIn(lowerCAmelCase , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) snake_case = tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case ,snake_case = self.get_clean_sequence(lowerCAmelCase ) # a special token for Canine can be defined as follows: snake_case = 0xE_0_0_5 snake_case = chr(lowerCAmelCase ) tokenizer.add_special_tokens({'cls_token': special_token} ) snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ) , 1 ) snake_case = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase ) snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertEqual(lowerCAmelCase , input_encoded + special_token_id ) snake_case = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase ) self.assertTrue(special_token not in decoded ) def snake_case ( self ): """simple docstring""" snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case = chr(0xE_0_0_5 ) snake_case = chr(0xE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} ) snake_case = tokenizer.tokenize(lowerCAmelCase ) snake_case = tokenizer.tokenize(lowerCAmelCase ) self.assertEqual(len(lowerCAmelCase ) , 1 ) self.assertEqual(len(lowerCAmelCase ) , 1 ) self.assertEqual(token_a[0] , lowerCAmelCase ) self.assertEqual(token_a[0] , lowerCAmelCase ) @require_tokenizers def snake_case ( self ): """simple docstring""" snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: snake_case = 0xE_0_0_6 snake_case = chr(lowerCAmelCase ) snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase ) tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCAmelCase ) tokenizer.from_pretrained(lowerCAmelCase ) def snake_case ( self ): """simple docstring""" snake_case = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCAmelCase ) with open(os.path.join(lowerCAmelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: snake_case = json.load(lowerCAmelCase ) with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: snake_case = json.load(lowerCAmelCase ) # a special token for Canine can be defined as follows: snake_case = 0xE_0_0_6 snake_case = chr(lowerCAmelCase ) snake_case = [new_token_a] snake_case = [new_token_a] with open(os.path.join(lowerCAmelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowerCAmelCase , lowerCAmelCase ) with open(os.path.join(lowerCAmelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowerCAmelCase , lowerCAmelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files snake_case = tokenizer_class.from_pretrained(lowerCAmelCase , extra_ids=0 ) self.assertIn(lowerCAmelCase , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) snake_case = 0xE_0_0_7 snake_case = chr(lowerCAmelCase ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained snake_case = [AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase )] snake_case = tokenizer_class.from_pretrained( lowerCAmelCase , additional_special_tokens=lowerCAmelCase , extra_ids=0 ) self.assertIn(lowerCAmelCase , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def snake_case ( self ): """simple docstring""" snake_case = self.get_tokenizers(do_lower_case=lowerCAmelCase ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case = 'hello world' if self.space_between_special_tokens: snake_case = '[CLS] hello world [SEP]' else: snake_case = input snake_case = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) snake_case = tokenizer.decode(lowerCAmelCase , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCAmelCase , [output, output.lower()] ) def snake_case ( self ): """simple docstring""" snake_case = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): snake_case = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] snake_case = 'a' snake_case = ord(lowerCAmelCase ) for attr in attributes_list: setattr(lowerCAmelCase , attr + '_id' , lowerCAmelCase ) self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(getattr(lowerCAmelCase , attr + '_id' ) , lowerCAmelCase ) setattr(lowerCAmelCase , attr + '_id' , lowerCAmelCase ) self.assertEqual(getattr(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(getattr(lowerCAmelCase , attr + '_id' ) , lowerCAmelCase ) setattr(lowerCAmelCase , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(lowerCAmelCase , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(lowerCAmelCase , 'additional_special_tokens_ids' ) , [] ) snake_case = 0xE_0_0_6 snake_case = chr(lowerCAmelCase ) setattr(lowerCAmelCase , 'additional_special_tokens_ids' , [additional_special_token_id] ) self.assertListEqual(getattr(lowerCAmelCase , 'additional_special_tokens' ) , [additional_special_token] ) self.assertListEqual(getattr(lowerCAmelCase , 'additional_special_tokens_ids' ) , [additional_special_token_id] ) def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass
149
"""simple docstring""" import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False ) -> Any: """simple docstring""" try: snake_case = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case = default else: # KEY is set, convert it to True or False. try: snake_case = strtobool(_UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value SCREAMING_SNAKE_CASE__ = parse_flag_from_env("RUN_SLOW", default=False) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str: """simple docstring""" return unittest.skip('Test was skipped' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[str]: """simple docstring""" return unittest.skipUnless(_run_slow_tests , 'test is slow' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> int: """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Tuple: """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[int]: """simple docstring""" return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Any: """simple docstring""" return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> str: """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> Dict: """simple docstring""" return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> List[Any]: """simple docstring""" return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> int: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[Any]: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] ) -> List[Any]: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str: """simple docstring""" return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> List[str]: """simple docstring""" return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : int ) -> List[Any]: """simple docstring""" return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Dict=None , _UpperCamelCase : Dict=None ) -> int: """simple docstring""" if test_case is None: return partial(_UpperCamelCase , version=_UpperCamelCase ) return unittest.skipUnless(is_torch_version('>=' , _UpperCamelCase ) , f"""test requires torch version >= {version}""" )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[str]: """simple docstring""" return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : int ) -> int: """simple docstring""" return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(_UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any: """simple docstring""" return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(_UpperCamelCase ) SCREAMING_SNAKE_CASE__ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCAmelCase__ ( _UpperCamelCase : str ) -> List[str]: """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(_UpperCamelCase ) class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" _lowerCAmelCase : Union[str, Any] = True @classmethod def snake_case ( cls ): """simple docstring""" snake_case = tempfile.mkdtemp() @classmethod def snake_case ( cls ): """simple docstring""" if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def snake_case ( self ): """simple docstring""" if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(lowerCAmelCase ) class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self ): """simple docstring""" super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = mocks if isinstance(lowerCAmelCase , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Any: """simple docstring""" snake_case = AcceleratorState() snake_case = tensor[None].clone().to(state.device ) snake_case = gather(_UpperCamelCase ).cpu() snake_case = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCamelCase ): return False return True class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" snake_case = returncode snake_case = stdout snake_case = stderr async def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> List[Any]: """simple docstring""" while True: snake_case = await stream.readline() if line: callback(_UpperCamelCase ) else: break async def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Optional[int]=False ) -> _RunOutput: """simple docstring""" if echo: print('\nRunning: ' , ' '.join(_UpperCamelCase ) ) snake_case = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case = [] snake_case = [] def tee(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]="" ): snake_case = line.decode('utf-8' ).rstrip() sink.append(_UpperCamelCase ) if not quiet: print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ) ), ] , timeout=_UpperCamelCase , ) return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=None , _UpperCamelCase : List[str]=None , _UpperCamelCase : Tuple=1_8_0 , _UpperCamelCase : Dict=False , _UpperCamelCase : Optional[Any]=True ) -> _RunOutput: """simple docstring""" snake_case = asyncio.get_event_loop() snake_case = loop.run_until_complete( _stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) ) snake_case = ' '.join(_UpperCamelCase ) if result.returncode > 0: snake_case = '\n'.join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) return result class lowerCAmelCase_ ( lowerCAmelCase ): """simple docstring""" pass def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=False ) -> Optional[Any]: """simple docstring""" try: snake_case = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCamelCase , 'decode' ): snake_case = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f"""Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
149
1
import math import os import sys def lowercase( UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = """""" try: with open(__lowerCAmelCase , """rb""" ) as binary_file: UpperCamelCase = binary_file.read() for dat in data: UpperCamelCase = f"""{dat:08b}""" result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None: '''simple docstring''' lexicon.pop(__lowerCAmelCase ) UpperCamelCase = last_match_id if math.loga(__lowerCAmelCase ).is_integer(): for curr_key in lexicon: UpperCamelCase = """0""" + lexicon[curr_key] UpperCamelCase = bin(__lowerCAmelCase )[2:] def lowercase( UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = {"""0""": """0""", """1""": """1"""} UpperCamelCase , UpperCamelCase = """""", """""" UpperCamelCase = len(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue UpperCamelCase = lexicon[curr_string] result += last_match_id add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) index += 1 UpperCamelCase = """""" while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": UpperCamelCase = lexicon[curr_string] result += last_match_id return result def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str: '''simple docstring''' UpperCamelCase = os.path.getsize(__lowerCAmelCase ) UpperCamelCase = bin(__lowerCAmelCase )[2:] UpperCamelCase = len(__lowerCAmelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> None: '''simple docstring''' UpperCamelCase = 8 try: with open(__lowerCAmelCase , """wb""" ) as opened_file: UpperCamelCase = [ to_write[i : i + byte_length] for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> None: '''simple docstring''' UpperCamelCase = read_file_binary(__lowerCAmelCase ) UpperCamelCase = compress_data(__lowerCAmelCase ) UpperCamelCase = add_file_length(__lowerCAmelCase , __lowerCAmelCase ) write_file_binary(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
343
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ) -> List[Any]: '''simple docstring''' model.train() lowercase_ = model(__lowerCAmelCase ) lowercase_ = F.mse_loss(__lowerCAmelCase , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]: '''simple docstring''' set_seed(42 ) lowercase_ = RegressionModel() lowercase_ = deepcopy(__lowerCAmelCase ) lowercase_ = RegressionDataset(length=80 ) lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 ) model.to(accelerator.device ) if sched: lowercase_ = AdamW(params=model.parameters() , lr=1E-3 ) lowercase_ = AdamW(params=ddp_model.parameters() , lr=1E-3 ) lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) lowercase_ = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.65 ) # Make a copy of `model` if sched: lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase ) # Use a single batch lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) ) lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase ) # Use a single batch lowercase_ , lowercase_ = next(iter(__lowerCAmelCase ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) ) lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) else: # Sync grads step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[Any]: '''simple docstring''' lowercase_ = Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): lowercase_ , lowercase_ = batch.values() # Gather the distributed inputs and targs for the base model lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) ) lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowercase_ = ddp_input[torch.randperm(len(__lowerCAmelCase ) )] GradientState._reset_state() def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[int]: '''simple docstring''' lowercase_ = Accelerator( split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = get_training_setup(__lowerCAmelCase , __lowerCAmelCase ) for iteration, batch in enumerate(__lowerCAmelCase ): lowercase_ , lowercase_ = batch.values() # Gather the distributed inputs and targs for the base model lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) ) lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__lowerCAmelCase ): step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' lowercase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase )) if accelerator.num_processes > 1: check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def _SCREAMING_SNAKE_CASE () -> Optional[Any]: '''simple docstring''' lowercase_ = Accelerator() lowercase_ = RegressionDataset(length=80 ) lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 ) lowercase_ = RegressionDataset(length=96 ) lowercase_ = DataLoader(__lowerCAmelCase , batch_size=16 ) lowercase_ , lowercase_ = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if iteration < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__lowerCAmelCase ): assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase ) if batch_num < len(__lowerCAmelCase ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _SCREAMING_SNAKE_CASE () -> List[str]: '''simple docstring''' lowercase_ = Accelerator() lowercase_ = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(__lowerCAmelCase ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(__lowerCAmelCase ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> str: '''simple docstring''' main() if __name__ == "__main__": main()
136
0
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __A : Any = logging.get_logger(__name__) class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = None @staticmethod def lowercase__ ( )->List[str]: raise NotImplementedError def lowercase__ ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : Any )->str: raise NotImplementedError def lowercase__ ( self : Any , __UpperCamelCase : List[str] )->List[str]: raise NotImplementedError def lowercase__ ( self : List[Any] )->int: if not self.is_available(): raise RuntimeError( F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def lowercase__ ( cls : Optional[Any] )->Optional[Any]: return F'`pip install {cls.pip_package or cls.name}`' class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """optuna""" @staticmethod def lowercase__ ( )->int: return is_optuna_available() def lowercase__ ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : int )->Optional[Any]: return run_hp_search_optuna(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : List[Any] , __UpperCamelCase : List[str] )->List[Any]: return default_hp_space_optuna(__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """ray""" UpperCamelCase__ = """'ray[tune]'""" @staticmethod def lowercase__ ( )->Optional[Any]: return is_ray_available() def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : str )->Union[str, Any]: return run_hp_search_ray(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Dict , __UpperCamelCase : List[str] )->Optional[int]: return default_hp_space_ray(__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """sigopt""" @staticmethod def lowercase__ ( )->Optional[Any]: return is_sigopt_available() def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : List[str] )->List[Any]: return run_hp_search_sigopt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : Optional[int] , __UpperCamelCase : Any )->Any: return default_hp_space_sigopt(__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """wandb""" @staticmethod def lowercase__ ( )->int: return is_wandb_available() def lowercase__ ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : str , **__UpperCamelCase : int )->Optional[Any]: return run_hp_search_wandb(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] )->Tuple: return default_hp_space_wandb(__UpperCamelCase ) __A : List[Any] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(_SCREAMING_SNAKE_CASE ) > 0: _UpperCAmelCase = available_backends[0].name if len(_SCREAMING_SNAKE_CASE ) > 1: logger.info( f'{len(_SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( '''No hyperparameter search backend available.\n''' + '''\n'''.join( f' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
368
"""simple docstring""" import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a ( lowerCAmelCase , unittest.TestCase): """simple docstring""" UpperCamelCase__ = CTRLTokenizer UpperCamelCase__ = False UpperCamelCase__ = False def lowercase__ ( self : Dict )->str: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>'''] _UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) _UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', ''''''] _UpperCAmelCase = {'''unk_token''': '''<unk>'''} _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any: kwargs.update(self.special_tokens_map ) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt react readapt apt''' return input_text, output_text def lowercase__ ( self : Dict )->Optional[int]: _UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase = '''adapt react readapt apt''' _UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split() _UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = tokens + [tokenizer.unk_token] _UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
326
0
import numpy # List of input, output pairs lowercase__ : Tuple = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) lowercase__ : Any = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) lowercase__ : Optional[int] = [2, 4, 1, 5] lowercase__ : List[Any] = len(train_data) lowercase__ : Dict = 0.0_0_9 def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__="train" ) -> Tuple: return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output( lowerCAmelCase_ , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict: lowerCAmelCase = 0 for i in range(len(lowerCAmelCase_ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[str]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=m ) -> List[str]: lowerCAmelCase = 0 for i in range(lowerCAmelCase_ ): if index == -1: summation_value += _error(lowerCAmelCase_ ) else: summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index] return summation_value def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple: lowerCAmelCase = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m return cost_derivative_value def SCREAMING_SNAKE_CASE_ ( ) -> List[str]: global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCAmelCase = 0.00_00_02 lowerCAmelCase = 0 lowerCAmelCase = 0 while True: j += 1 lowerCAmelCase = [0, 0, 0, 0] for i in range(0 , len(lowerCAmelCase_ ) ): lowerCAmelCase = get_cost_derivative(i - 1 ) lowerCAmelCase = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ): break lowerCAmelCase = temp_parameter_vector print(('''Number of iterations:''', j) ) def SCREAMING_SNAKE_CASE_ ( ) -> str: for i in range(len(lowerCAmelCase_ ) ): print(('''Actual output value:''', output(lowerCAmelCase_ , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCAmelCase_ , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print('''\nTesting gradient descent for a linear hypothesis function.\n''') test_gradient_descent()
338
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __lowerCAmelCase = None __lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>''' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __lowerCAmelCase = [ np.dtype('''|b1'''), np.dtype('''|u1'''), np.dtype('''<u2'''), np.dtype('''>u2'''), np.dtype('''<i2'''), np.dtype('''>i2'''), np.dtype('''<u4'''), np.dtype('''>u4'''), np.dtype('''<i4'''), np.dtype('''>i4'''), np.dtype('''<f4'''), np.dtype('''>f4'''), np.dtype('''<f8'''), np.dtype('''>f8'''), ] @dataclass class __magic_name__ : lowerCAmelCase : bool = True lowerCAmelCase : Optional[str] = None # Automatically constructed lowerCAmelCase : ClassVar[str] = "PIL.Image.Image" lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase ) def __call__( self : Union[str, Any] ): return self.pa_type def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): _a : Optional[Any] = np.array(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase ,np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase ) elif isinstance(_UpperCAmelCase ,PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase ) elif value.get('path' ) is not None and os.path.isfile(value['path'] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path' )} elif value.get('bytes' ) is not None or value.get('path' ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes' ), "path": value.get('path' )} else: raise ValueError( F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ): if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.' ) if token_per_repo_id is None: _a : Dict = {} _a , _a : str = value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" ) else: if is_local_path(_UpperCAmelCase ): _a : Any = PIL.Image.open(_UpperCAmelCase ) else: _a : List[Any] = path.split('::' )[-1] try: _a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id'] _a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase ) except ValueError: _a : int = None with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f: _a : Tuple = BytesIO(f.read() ) _a : Union[str, Any] = PIL.Image.open(bytes_ ) else: _a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def __lowercase ( self : int ): from .features import Value return ( self if self.decode else { "bytes": Value('binary' ), "path": Value('string' ), } ) def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): if pa.types.is_string(storage.type ): _a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) _a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('bytes' ) >= 0: _a : Union[str, Any] = storage.field('bytes' ) else: _a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() ) if storage.type.get_field_index('path' ) >= 0: _a : Union[str, Any] = storage.field('path' ) else: _a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() ) elif pa.types.is_list(storage.type ): _a : List[str] = pa.array( [encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,) _a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() ) _a : Optional[Any] = pa.StructArray.from_arrays( [bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ): @no_op_if_value_is_null def path_to_bytes(_UpperCAmelCase : Tuple ): with xopen(_UpperCAmelCase ,'rb' ) as f: _a : int = f.read() return bytes_ _a : Any = pa.array( [ (path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] ,type=pa.binary() ,) _a : Optional[Any] = pa.array( [os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,) _a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() ) return array_cast(_UpperCAmelCase ,self.pa_type ) def __lowerCamelCase ( ) -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes: _a : Optional[int] = BytesIO() if image.format in list_image_compression_formats(): _a : Optional[Any] = image.format else: _a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(lowerCAmelCase_ , format=lowerCAmelCase_ ) return buffer.getvalue() def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _a : List[Any] = array.dtype _a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _a : Union[str, Any] = dtype.kind _a : Union[str, Any] = dtype.itemsize _a : List[Any] = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _a : Optional[int] = np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _a : Union[str, Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ ) _a : List[Any] = np.dtype(lowerCAmelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) _a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )} def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCAmelCase_ , np.ndarray ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] elif isinstance(lowerCAmelCase_ , PIL.Image.Image ): _a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ ) return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs] else: return objs else: return objs
89
0
from __future__ import annotations def __lowerCamelCase (UpperCAmelCase__ : int ): SCREAMING_SNAKE_CASE = str(UpperCAmelCase__ ) return len(UpperCAmelCase__ ) == 9 and set(UpperCAmelCase__ ) == set("123456789" ) def __lowerCamelCase (): for base_num in range(9_9_9_9 , 4_9_9_9 , -1 ): SCREAMING_SNAKE_CASE = 1_0_0_0_0_2 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate for base_num in range(3_3_3 , 9_9 , -1 ): SCREAMING_SNAKE_CASE = 1_0_0_2_0_0_3 * base_num if is_9_pandigital(UpperCAmelCase__ ): return candidate return None if __name__ == "__main__": print(f"""{solution() = }""")
206
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowercase : lowercase__ : Dict = LEDConfig lowercase__ : List[str] = {} lowercase__ : Union[str, Any] = """gelu""" def __init__( self : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[int]=7 , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=False , _UpperCamelCase : Dict=99 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Any=2 , _UpperCamelCase : List[str]=4 , _UpperCamelCase : Union[str, Any]=37 , _UpperCamelCase : str=0.1 , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Union[str, Any]=20 , _UpperCamelCase : str=2 , _UpperCamelCase : Optional[Any]=1 , _UpperCamelCase : Optional[int]=0 , _UpperCamelCase : int=4 , ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id SCREAMING_SNAKE_CASE = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after SCREAMING_SNAKE_CASE = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests SCREAMING_SNAKE_CASE = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def __snake_case( self : int ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = tf.concat( [tf.zeros_like(_UpperCamelCase )[:, :-1], tf.ones_like(_UpperCamelCase )[:, -1:]] , axis=-1 , ) SCREAMING_SNAKE_CASE = global_attention_mask return config, inputs_dict def __snake_case( self : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = TFLEDModel(config=_UpperCamelCase ).get_decoder() SCREAMING_SNAKE_CASE = inputs_dict["input_ids"] SCREAMING_SNAKE_CASE = input_ids[:1, :] SCREAMING_SNAKE_CASE = inputs_dict["attention_mask"][:1, :] SCREAMING_SNAKE_CASE = 1 # first forward pass SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , use_cache=_UpperCamelCase ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 ) SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase )[0] SCREAMING_SNAKE_CASE = model(_UpperCamelCase , attention_mask=_UpperCamelCase , past_key_values=_UpperCamelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCamelCase , _UpperCamelCase , rtol=1e-3 ) def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Union[str, Any]=None , ): if attention_mask is None: SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: SCREAMING_SNAKE_CASE = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowercase ( a , a , unittest.TestCase ): lowercase__ : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowercase__ : List[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowercase__ : int = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowercase__ : List[Any] = True lowercase__ : List[str] = False lowercase__ : List[str] = False lowercase__ : Union[str, Any] = False def __snake_case( self : Tuple ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = TFLEDModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase ) def __snake_case( self : List[Any] ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def __snake_case( self : List[str] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCamelCase ) def __snake_case( self : Dict ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE = tf.zeros_like(inputs_dict["attention_mask"] ) SCREAMING_SNAKE_CASE = 2 SCREAMING_SNAKE_CASE = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , ) SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = self.model_tester.seq_length SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length def check_decoder_attentions_output(_UpperCamelCase : Dict ): SCREAMING_SNAKE_CASE = outputs.decoder_attentions self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(_UpperCamelCase : Optional[Any] ): SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_attentions] SCREAMING_SNAKE_CASE = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_UpperCamelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) self.assertEqual(config.output_hidden_states , _UpperCamelCase ) check_encoder_attentions_output(_UpperCamelCase ) if self.is_encoder_decoder: SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCamelCase ) check_decoder_attentions_output(_UpperCamelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCamelCase ) check_encoder_attentions_output(_UpperCamelCase ) # Check attention is always last and order is fine SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = model_class(_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCamelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCamelCase ) check_encoder_attentions_output(_UpperCamelCase ) @unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." ) def __snake_case( self : Optional[Any] ) -> Tuple: '''simple docstring''' pass def __snake_case( self : str ) -> str: '''simple docstring''' pass def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ): return tf.constant(UpperCAmelCase__ , dtype=tf.intaa ) _lowerCamelCase : str = 1e-4 @slow @require_tf class lowercase ( unittest.TestCase ): def __snake_case( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led # change to intended input here SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0] SCREAMING_SNAKE_CASE = (1, 1_024, 768) self.assertEqual(output.shape , _UpperCamelCase ) # change to expected output here SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 ) def __snake_case( self : Any ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ) # change to intended input here SCREAMING_SNAKE_CASE = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) SCREAMING_SNAKE_CASE = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) SCREAMING_SNAKE_CASE = prepare_led_inputs_dict(model.config , _UpperCamelCase , _UpperCamelCase ) SCREAMING_SNAKE_CASE = model(**_UpperCamelCase )[0] SCREAMING_SNAKE_CASE = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , _UpperCamelCase ) # change to expected output here SCREAMING_SNAKE_CASE = tf.convert_to_tensor( [[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCamelCase , atol=1e-3 , rtol=1e-3 )
206
1
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def A_ ( _UpperCAmelCase ): if isinstance(_UpperCAmelCase , collections.abc.Iterable ): return x return (x, x) @require_tf class __lowercase : """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : int): pass def _SCREAMING_SNAKE_CASE ( self : str): pass def _SCREAMING_SNAKE_CASE ( self : Dict): pass def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Tuple): SCREAMING_SNAKE_CASE_: int = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = TFVisionTextDualEncoderModel(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=None , **lowerCAmelCase__ : List[str]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int]=None , **lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = {"vision_model": vision_model, "text_model": text_model} SCREAMING_SNAKE_CASE_: Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim)) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=None , **lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = model(input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = after_output[0].numpy() SCREAMING_SNAKE_CASE_: Union[str, Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowerCAmelCase__ , 1E-5) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , **lowerCAmelCase__ : List[Any]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE_: List[str] = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_: Dict = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_: int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_: str = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_: Dict = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float): SCREAMING_SNAKE_CASE_: int = np.abs((a - b)).max() self.assertLessEqual(lowerCAmelCase__ , lowerCAmelCase__ , F"Difference between torch and flax is {diff} (>= {tol}).") def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Tuple = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Any = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: Any = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase__) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = self.get_pretrained_model_and_inputs() SCREAMING_SNAKE_CASE_: Dict = model_a(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = model_a(**lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = after_outputs[0].numpy() SCREAMING_SNAKE_CASE_: Optional[Any] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowerCAmelCase__ , 1E-5) @require_tf class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert") SCREAMING_SNAKE_CASE_: List[str] = 13 SCREAMING_SNAKE_CASE_: int = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_: Union[str, Any] = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_: Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: Any = TFViTModel(lowerCAmelCase__ , name="vision_model") SCREAMING_SNAKE_CASE_: Optional[Any] = TFBertModel(lowerCAmelCase__ , name="text_model") return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Dict): SCREAMING_SNAKE_CASE_: Union[str, Any] = TFViTModelTester(self) SCREAMING_SNAKE_CASE_: Union[str, Any] = TFBertModelTester(self) SCREAMING_SNAKE_CASE_: str = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_: List[Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = vision_config_and_inputs ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. SCREAMING_SNAKE_CASE_: Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta") SCREAMING_SNAKE_CASE_: Optional[int] = 13 SCREAMING_SNAKE_CASE_: Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_: Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_: Dict = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_: Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self.get_vision_text_model(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase__ , text_model=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = model( input_ids=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) SCREAMING_SNAKE_CASE_: Dict = to_atuple(vision_model.config.image_size) SCREAMING_SNAKE_CASE_: int = to_atuple(vision_model.config.patch_size) SCREAMING_SNAKE_CASE_: List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) SCREAMING_SNAKE_CASE_: Union[str, Any] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) SCREAMING_SNAKE_CASE_: List[Any] = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase__) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : str): SCREAMING_SNAKE_CASE_: Optional[int] = TFDeiTModel(lowerCAmelCase__ , name="vision_model") SCREAMING_SNAKE_CASE_: Optional[int] = TFRobertaModel(lowerCAmelCase__ , name="text_model") return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Dict = TFDeiTModelTester(self) SCREAMING_SNAKE_CASE_: List[str] = TFRobertaModelTester(self) SCREAMING_SNAKE_CASE_: Optional[Any] = vit_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_: Union[str, Any] = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = vision_config_and_inputs ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class __lowercase ( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: str = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert") SCREAMING_SNAKE_CASE_: List[str] = 13 SCREAMING_SNAKE_CASE_: Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ]) SCREAMING_SNAKE_CASE_: Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size) SCREAMING_SNAKE_CASE_: Dict = random_attention_mask([batch_size, 4]) SCREAMING_SNAKE_CASE_: Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any): SCREAMING_SNAKE_CASE_: List[str] = TFCLIPVisionModel(lowerCAmelCase__ , name="vision_model") SCREAMING_SNAKE_CASE_: List[str] = TFBertModel(lowerCAmelCase__ , name="text_model") return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Optional[int] = TFCLIPVisionModelTester(self) SCREAMING_SNAKE_CASE_: Any = TFBertModelTester(self) SCREAMING_SNAKE_CASE_: Optional[int] = clip_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_: str = bert_model_tester.prepare_config_and_inputs() SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = vision_config_and_inputs ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ): List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class __lowercase ( unittest.TestCase ): """simple docstring""" @slow def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Optional[int] = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") SCREAMING_SNAKE_CASE_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") SCREAMING_SNAKE_CASE_: int = processor( text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np") SCREAMING_SNAKE_CASE_: List[str] = model(**lowerCAmelCase__) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) SCREAMING_SNAKE_CASE_: Optional[Any] = np.array([[1.228_4727, 0.310_4122]]) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase__ , atol=1E-3))
13
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule UpperCamelCase__ : int = { 'config': [ 'EXTERNAL_DATA_FORMAT_SIZE_LIMIT', 'OnnxConfig', 'OnnxConfigWithPast', 'OnnxSeq2SeqConfigWithPast', 'PatchingSpec', ], 'convert': ['export', 'validate_model_outputs'], 'features': ['FeaturesManager'], 'utils': ['ParameterFormat', 'compute_serialized_parameters_size'], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
344
0
import os def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" with open(os.path.dirname(_UpperCamelCase ) + '''/grid.txt''' ) as f: snake_case_ : Optional[int] = [] # noqa: E741 for _ in range(20 ): l.append([int(_UpperCamelCase ) for x in f.readline().split()] ) snake_case_ : Dict = 0 # right for i in range(20 ): for j in range(17 ): snake_case_ : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: snake_case_ : Any = temp # down for i in range(17 ): for j in range(20 ): snake_case_ : Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: snake_case_ : Union[str, Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): snake_case_ : Any = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: snake_case_ : List[Any] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): snake_case_ : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: snake_case_ : str = temp return maximum if __name__ == "__main__": print(solution())
279
import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class __lowerCAmelCase ( nn.Module ): lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : float = 0.0 lowerCamelCase_ : int = 1 lowerCamelCase_ : int = 1 lowerCamelCase_ : bool = True lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : jnp.dtype = jnp.floataa def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = [] snake_case_ : List[str] = [] for i in range(self.num_layers ): snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels snake_case_ : Dict = FlaxResnetBlockaD( in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) snake_case_ : str = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__magic_name__ ) snake_case_ : Union[str, Any] = resnets snake_case_ : Union[str, Any] = attentions if self.add_downsample: snake_case_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]: '''simple docstring''' snake_case_ : str = () for resnet, attn in zip(self.resnets , self.attentions ): snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) snake_case_ : List[str] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) output_states += (hidden_states,) if self.add_downsample: snake_case_ : Union[str, Any] = self.downsamplers_a(__magic_name__ ) output_states += (hidden_states,) return hidden_states, output_states class __lowerCAmelCase ( nn.Module ): lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : float = 0.0 lowerCamelCase_ : int = 1 lowerCamelCase_ : bool = True lowerCamelCase_ : jnp.dtype = jnp.floataa def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : Optional[Any] = [] for i in range(self.num_layers ): snake_case_ : List[Any] = self.in_channels if i == 0 else self.out_channels snake_case_ : Tuple = FlaxResnetBlockaD( in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) snake_case_ : Dict = resnets if self.add_downsample: snake_case_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = () for resnet in self.resnets: snake_case_ : List[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) output_states += (hidden_states,) if self.add_downsample: snake_case_ : str = self.downsamplers_a(__magic_name__ ) output_states += (hidden_states,) return hidden_states, output_states class __lowerCAmelCase ( nn.Module ): lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : float = 0.0 lowerCamelCase_ : int = 1 lowerCamelCase_ : int = 1 lowerCamelCase_ : bool = True lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : jnp.dtype = jnp.floataa def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Optional[Any] = [] snake_case_ : Optional[Any] = [] for i in range(self.num_layers ): snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels snake_case_ : Dict = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) snake_case_ : List[str] = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__magic_name__ ) snake_case_ : List[Any] = resnets snake_case_ : Tuple = attentions if self.add_upsample: snake_case_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]: '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states snake_case_ : Dict = res_hidden_states_tuple[-1] snake_case_ : List[Any] = res_hidden_states_tuple[:-1] snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) snake_case_ : Tuple = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) if self.add_upsample: snake_case_ : Optional[Any] = self.upsamplers_a(__magic_name__ ) return hidden_states class __lowerCAmelCase ( nn.Module ): lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : int lowerCamelCase_ : float = 0.0 lowerCamelCase_ : int = 1 lowerCamelCase_ : bool = True lowerCamelCase_ : jnp.dtype = jnp.floataa def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Union[str, Any] = [] for i in range(self.num_layers ): snake_case_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels snake_case_ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels snake_case_ : int = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) snake_case_ : Tuple = resnets if self.add_upsample: snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]: '''simple docstring''' for resnet in self.resnets: # pop res hidden states snake_case_ : Tuple = res_hidden_states_tuple[-1] snake_case_ : List[Any] = res_hidden_states_tuple[:-1] snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) if self.add_upsample: snake_case_ : Optional[int] = self.upsamplers_a(__magic_name__ ) return hidden_states class __lowerCAmelCase ( nn.Module ): lowerCamelCase_ : int lowerCamelCase_ : float = 0.0 lowerCamelCase_ : int = 1 lowerCamelCase_ : int = 1 lowerCamelCase_ : bool = False lowerCamelCase_ : bool = False lowerCamelCase_ : jnp.dtype = jnp.floataa def lowerCamelCase (self ) -> str: '''simple docstring''' snake_case_ : Dict = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] snake_case_ : int = [] for _ in range(self.num_layers ): snake_case_ : str = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__magic_name__ ) snake_case_ : Dict = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__magic_name__ ) snake_case_ : Optional[Any] = resnets snake_case_ : Optional[int] = attentions def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]: '''simple docstring''' snake_case_ : List[Any] = self.resnets[0](__magic_name__ , __magic_name__ ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ ) return hidden_states
279
1
'''simple docstring''' import numpy as np from PIL import Image def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray: '''simple docstring''' snake_case_ = np.array(lowercase_ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 # compute the shape of the output matrix snake_case_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape snake_case_ = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix snake_case_ = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case_ = 0 snake_case_ = 0 return updated_arr def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> np.ndarray: '''simple docstring''' snake_case_ = np.array(lowercase_ ) if arr.shape[0] != arr.shape[1]: raise ValueError('''The input array is not a square matrix''' ) snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 # compute the shape of the output matrix snake_case_ = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape snake_case_ = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix snake_case_ = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 snake_case_ = 0 snake_case_ = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image a : List[Any] = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
56
import argparse import os import re A_ : List[str] = 'src/diffusers' # Pattern that looks at the indentation in a line. A_ : Union[str, Any] = re.compile(r'^(\s*)\S') # Pattern that matches `"key":" and puts `key` in group 0. A_ : int = re.compile(r'^\s*"([^"]+)":') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. A_ : Optional[int] = re.compile(r'^\s*_import_structure\["([^"]+)"\]') # Pattern that matches `"key",` and puts `key` in group 0. A_ : List[Any] = re.compile(r'^\s*"([^"]+)",\s*$') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. A_ : List[str] = re.compile(r'\[([^\]]+)\]') def UpperCamelCase (lowercase_: List[str] ) -> Dict: A__ : Optional[Any] = _re_indent.search(lowercase_ ) return "" if search is None else search.groups()[0] def UpperCamelCase (lowercase_: Dict , lowercase_: Any="" , lowercase_: Any=None , lowercase_: Any=None ) -> Tuple: A__ : Optional[Any] = 0 A__ : str = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(lowercase_ ): index += 1 A__ : Tuple = ["""\n""".join(lines[:index] )] else: A__ : Optional[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). A__ : Union[str, Any] = [lines[index]] index += 1 while index < len(lowercase_ ) and (end_prompt is None or not lines[index].startswith(lowercase_ )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowercase_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(lowercase_ ) ) if index < len(lowercase_ ) - 1: A__ : Union[str, Any] = [lines[index + 1]] index += 1 else: A__ : List[Any] = [] else: blocks.append("""\n""".join(lowercase_ ) ) A__ : int = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowercase_ ) > 0: blocks.append("""\n""".join(lowercase_ ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowercase_ ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def UpperCamelCase (lowercase_: str ) -> str: def _inner(lowercase_: Union[str, Any] ): return key(lowercase_ ).lower().replace("""_""" , """""" ) return _inner def UpperCamelCase (lowercase_: int , lowercase_: Any=None ) -> str: # If no key is provided, we use a noop. def noop(lowercase_: Any ): return x if key is None: A__ : Optional[Any] = noop # Constants are all uppercase, they go first. A__ : Optional[int] = [obj for obj in objects if key(lowercase_ ).isupper()] # Classes are not all uppercase but start with a capital, they go second. A__ : List[Any] = [obj for obj in objects if key(lowercase_ )[0].isupper() and not key(lowercase_ ).isupper()] # Functions begin with a lowercase, they go last. A__ : Tuple = [obj for obj in objects if not key(lowercase_ )[0].isupper()] A__ : Any = ignore_underscore(lowercase_ ) return sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) + sorted(lowercase_ , key=lowercase_ ) def UpperCamelCase (lowercase_: List[Any] ) -> List[Any]: # This inner function sort imports between [ ]. def _replace(lowercase_: List[Any] ): A__ : Tuple = match.groups()[0] if "," not in imports: return f"""[{imports}]""" A__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: A__ : Any = keys[:-1] return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) + "]" A__ : Dict = import_statement.split("""\n""" ) if len(lowercase_ ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. A__ : List[str] = 2 if lines[1].strip() == """[""" else 1 A__ : Any = [(i, _re_strip_line.search(lowercase_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] A__ : Any = sort_objects(lowercase_ , key=lambda lowercase_ : x[1] ) A__ : int = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowercase_ ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: A__ : Union[str, Any] = _re_bracket_content.sub(_replace , lines[1] ) else: A__ : Any = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: A__ : Tuple = keys[:-1] A__ : List[Any] = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(lowercase_ )] ) return "\n".join(lowercase_ ) else: # Finally we have to deal with imports fitting on one line A__ : int = _re_bracket_content.sub(_replace , lowercase_ ) return import_statement def UpperCamelCase (lowercase_: Optional[int] , lowercase_: str=True ) -> Any: with open(lowercase_ , """r""" ) as f: A__ : Optional[int] = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 A__ : Tuple = split_code_in_indented_blocks( lowercase_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(lowercase_ ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. A__ : int = main_blocks[block_idx] A__ : Optional[Any] = block.split("""\n""" ) # Get to the start of the imports. A__ : Any = 0 while line_idx < len(lowercase_ ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: A__ : Optional[Any] = len(lowercase_ ) else: line_idx += 1 if line_idx >= len(lowercase_ ): continue # Ignore beginning and last line: they don't contain anything. A__ : Union[str, Any] = """\n""".join(block_lines[line_idx:-1] ) A__ : List[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. A__ : Union[str, Any] = split_code_in_indented_blocks(lowercase_ , indent_level=lowercase_ ) # We have two categories of import key: list or _import_structure[key].append/extend A__ : Optional[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. A__ : int = [(pattern.search(lowercase_ ).groups()[0] if pattern.search(lowercase_ ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. A__ : int = [(i, key) for i, key in enumerate(lowercase_ ) if key is not None] A__ : List[Any] = [x[0] for x in sorted(lowercase_ , key=lambda lowercase_ : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. A__ : Optional[int] = 0 A__ : Any = [] for i in range(len(lowercase_ ) ): if keys[i] is None: reordered_blocks.append(internal_blocks[i] ) else: A__ : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reordered_blocks.append(lowercase_ ) count += 1 # And we put our main block back together with its first and last line. A__ : Tuple = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] ) if code != "\n".join(lowercase_ ): if check_only: return True else: print(f"""Overwriting {file}.""" ) with open(lowercase_ , """w""" ) as f: f.write("""\n""".join(lowercase_ ) ) def UpperCamelCase (lowercase_: Any=True ) -> Any: A__ : Dict = [] for root, _, files in os.walk(lowercase_ ): if "__init__.py" in files: A__ : List[Any] = sort_imports(os.path.join(lowercase_ , """__init__.py""" ) , check_only=lowercase_ ) if result: A__ : Optional[int] = [os.path.join(lowercase_ , """__init__.py""" )] if len(lowercase_ ) > 0: raise ValueError(f"""Would overwrite {len(lowercase_ )} files, run `make style`.""" ) if __name__ == "__main__": A_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.') A_ : List[str] = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
192
0
'''simple docstring''' def a ( __a , __a , __a ) -> list: '''simple docstring''' UpperCamelCase__ :Tuple = len(__a ) UpperCamelCase__ :Tuple = [[0] * n for i in range(__a )] for i in range(__a ): UpperCamelCase__ :Optional[Any] = y_points[i] for i in range(2 , __a ): for j in range(__a , __a ): UpperCamelCase__ :int = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
219
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def a ( __a=None ) -> List[str]: '''simple docstring''' UpperCamelCase__ :Optional[Any] = argparse.ArgumentParser(add_help=__a , allow_abbrev=__a ) # The main config parser UpperCamelCase__ :str = config_command_parser(__a ) # The subparser to add commands to UpperCamelCase__ :Union[str, Any] = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' ) # Then add other parsers with the parent parser default_command_parser(__a , parents=[parent_parser] ) update_command_parser(__a , parents=[parent_parser] ) return config_parser def a ( ) -> Any: '''simple docstring''' UpperCamelCase__ :int = get_config_parser() UpperCamelCase__ :List[Any] = config_parser.parse_args() if not hasattr(__a , '''func''' ): config_parser.print_help() exit(1 ) # Run args.func(__a ) if __name__ == "__main__": main()
219
1
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: # save results if os.path.exists(UpperCamelCase__ ): if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , 'config.json' ) ): os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) ) if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ): os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) else: os.makedirs(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False ) -> Tuple: snake_case_ = 2 if unlogit: snake_case_ = torch.pow(UpperCamelCase__ , UpperCamelCase__ ) snake_case_ = p * torch.log(UpperCamelCase__ ) snake_case_ = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: logger.info('lv, h >\t' + '\t'.join(f'{x + 1}' for x in range(len(UpperCamelCase__ ) ) ) ) for row in range(len(UpperCamelCase__ ) ): if tensor.dtype != torch.long: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(f'layer {row + 1}:\t' + '\t'.join(f'{x:d}' for x in tensor[row].cpu().data ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=False ) -> Optional[Any]: snake_case_ = model.config.num_hidden_layers, model.config.num_attention_heads snake_case_ = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) snake_case_ = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) if head_mask is None: snake_case_ = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCamelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case_ = None snake_case_ = 0.0 snake_case_ = 0.0 for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): snake_case_ = tuple(t.to(args.device ) for t in inputs ) (snake_case_ ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case_ = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case_ = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCamelCase__ ): snake_case_ = entropy(attn.detach() , UpperCamelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case_ = 2 snake_case_ = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: snake_case_ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(UpperCamelCase__ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(UpperCamelCase__ ) logger.info('Head ranked by importance scores' ) snake_case_ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case_ = torch.arange( head_importance.numel() , device=args.device ) snake_case_ = head_ranks.view_as(UpperCamelCase__ ) print_ad_tensor(UpperCamelCase__ ) return attn_entropy, head_importance, total_loss def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: snake_case_ = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ ) snake_case_ = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold ) snake_case_ = torch.ones_like(UpperCamelCase__ ) snake_case_ = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case_ = original_score while current_score >= original_score * args.masking_threshold: snake_case_ = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case_ = float('Inf' ) snake_case_ = head_importance.view(-1 ).sort()[1] if len(UpperCamelCase__ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads snake_case_ = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) snake_case_ = new_head_mask.view(-1 ) snake_case_ = 0.0 snake_case_ = new_head_mask.view_as(UpperCamelCase__ ) snake_case_ = new_head_mask.clone().detach() print_ad_tensor(UpperCamelCase__ ) # Compute metric and head importance again snake_case_ = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ ) snake_case_ = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(UpperCamelCase__ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: snake_case_ = datetime.now() snake_case_ = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ ) snake_case_ = 1 / loss snake_case_ = datetime.now() - before_time snake_case_ = sum(p.numel() for p in model.parameters() ) snake_case_ = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): snake_case_ = [ v, ] assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCamelCase__ ) snake_case_ = sum(p.numel() for p in model.parameters() ) snake_case_ = datetime.now() snake_case_ = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , ) snake_case_ = 1 / loss snake_case_ = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(UpperCamelCase__ , args.output_dir ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=UpperCamelCase__ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' ) parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 ) parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' ) snake_case_ = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case_ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) snake_case_ = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case_ = torch.device('cuda' , args.local_rank ) snake_case_ = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case_ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case_ = nn.parallel.DistributedDataParallel( UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ ) elif args.n_gpu > 1: snake_case_ = nn.DataParallel(UpperCamelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , UpperCamelCase__ ) # Prepare dataset snake_case_ = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case_ = (torch.from_numpy(UpperCamelCase__ ),) snake_case_ = TensorDataset(*UpperCamelCase__ ) snake_case_ = RandomSampler(UpperCamelCase__ ) snake_case_ = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case_ = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
69
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
0
import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal __A = datasets.utils.logging.get_logger(__name__) __A = ['''names''', '''prefix'''] __A = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] __A = ['''encoding_errors''', '''on_bad_lines'''] __A = ['''date_format'''] @dataclass class lowercase ( datasets.BuilderConfig): """simple docstring""" a__ : str = "," a__ : Optional[str] = None a__ : Optional[Union[int, List[int], str]] = "infer" a__ : Optional[List[str]] = None a__ : Optional[List[str]] = None a__ : Optional[Union[int, str, List[int], List[str]]] = None a__ : Optional[Union[List[int], List[str]]] = None a__ : Optional[str] = None a__ : bool = True a__ : Optional[Literal["c", "python", "pyarrow"]] = None a__ : Dict[Union[int, str], Callable[[Any], Any]] = None a__ : Optional[list] = None a__ : Optional[list] = None a__ : bool = False a__ : Optional[Union[int, List[int]]] = None a__ : Optional[int] = None a__ : Optional[Union[str, List[str]]] = None a__ : bool = True a__ : bool = True a__ : bool = False a__ : bool = True a__ : Optional[str] = None a__ : str = "." a__ : Optional[str] = None a__ : str = '"' a__ : int = 0 a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None a__ : bool = True a__ : bool = True a__ : int = 0 a__ : bool = True a__ : bool = False a__ : Optional[str] = None a__ : int = 1_0000 a__ : Optional[datasets.Features] = None a__ : Optional[str] = "strict" a__ : Literal["error", "warn", "skip"] = "error" a__ : Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: if self.delimiter is not None: UpperCAmelCase_= self.delimiter if self.column_names is not None: UpperCAmelCase_= self.column_names @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: UpperCAmelCase_= { """sep""": self.sep, """header""": self.header, """names""": self.names, """index_col""": self.index_col, """usecols""": self.usecols, """prefix""": self.prefix, """mangle_dupe_cols""": self.mangle_dupe_cols, """engine""": self.engine, """converters""": self.converters, """true_values""": self.true_values, """false_values""": self.false_values, """skipinitialspace""": self.skipinitialspace, """skiprows""": self.skiprows, """nrows""": self.nrows, """na_values""": self.na_values, """keep_default_na""": self.keep_default_na, """na_filter""": self.na_filter, """verbose""": self.verbose, """skip_blank_lines""": self.skip_blank_lines, """thousands""": self.thousands, """decimal""": self.decimal, """lineterminator""": self.lineterminator, """quotechar""": self.quotechar, """quoting""": self.quoting, """escapechar""": self.escapechar, """comment""": self.comment, """encoding""": self.encoding, """dialect""": self.dialect, """error_bad_lines""": self.error_bad_lines, """warn_bad_lines""": self.warn_bad_lines, """skipfooter""": self.skipfooter, """doublequote""": self.doublequote, """memory_map""": self.memory_map, """float_precision""": self.float_precision, """chunksize""": self.chunksize, """encoding_errors""": self.encoding_errors, """on_bad_lines""": self.on_bad_lines, """date_format""": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __UpperCAmelCase ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowercase ( datasets.ArrowBasedBuilder): """simple docstring""" a__ : int = CsvConfig def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : Dict ) -> Optional[int]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) UpperCAmelCase_= dl_manager.download_and_extract(self.config.data_files ) if isinstance(__UpperCAmelCase , (str, list, tuple) ): UpperCAmelCase_= data_files if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_= [files] UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] UpperCAmelCase_= [] for split_name, files in data_files.items(): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): UpperCAmelCase_= [files] UpperCAmelCase_= [dl_manager.iter_files(__UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"""files""": files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : pa.Table ) -> pa.Table: if self.config.features is not None: UpperCAmelCase_= self.config.features.arrow_schema if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ): # cheaper cast UpperCAmelCase_= pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__UpperCAmelCase ) else: # more expensive cast; allows str <-> int/float or str to Audio for example UpperCAmelCase_= table_cast(__UpperCAmelCase , __UpperCAmelCase ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[Any] ) -> List[str]: UpperCAmelCase_= self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str UpperCAmelCase_= ( { name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ): UpperCAmelCase_= pd.read_csv(__UpperCAmelCase , iterator=__UpperCAmelCase , dtype=__UpperCAmelCase , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(__UpperCAmelCase ): UpperCAmelCase_= pa.Table.from_pandas(__UpperCAmelCase ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(__UpperCAmelCase )}: {e}""" ) raise
277
import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def __a ( ) -> str: '''simple docstring''' UpperCAmelCase_= torch.nn.Linear(2 ,4 ) UpperCAmelCase_= torch.optim.AdamW(model.parameters() ,lr=1.0 ) UpperCAmelCase_= torch.optim.lr_scheduler.OneCycleLR(lowerCAmelCase_ ,max_lr=0.01 ,steps_per_epoch=2 ,epochs=1 ) UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) UpperCAmelCase_= DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def __a ( lowerCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' return (model.weight.abs().sum() + model.bias.abs().sum()).item() def __a ( lowerCAmelCase_ : Tuple ) -> Tuple: '''simple docstring''' UpperCAmelCase_= torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(lowerCAmelCase_ ) class lowercase ( snake_case__): """simple docstring""" @require_cuda def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_= Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(__UpperCAmelCase ): UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: UpperCAmelCase_= Accelerator() UpperCAmelCase_= GradientState() assert state.num_steps == 1 UpperCAmelCase_= 4 assert state.num_steps == 4 assert state.sync_gradients is True UpperCAmelCase_= False assert state.sync_gradients is False GradientState._reset_state() def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: UpperCAmelCase_= Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components() ( ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), ( UpperCAmelCase_ ), )= accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: UpperCAmelCase_= Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components() accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*__UpperCAmelCase : Dict , **__UpperCAmelCase : Tuple ): pass with patch("""torch.cuda.set_device""" , __UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ): UpperCAmelCase_= Accelerator() self.assertEqual(str(accelerator.state.device ) , """cuda:64""" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_= Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components() accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase_= get_signature(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__UpperCAmelCase ) # make sure random weights don't match load_random_weights(__UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(__UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_= Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components() accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase_= get_signature(__UpperCAmelCase ) # saving hook def save_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ): UpperCAmelCase_= {"""class_name""": models[0].__class__.__name__} with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """w""" ) as f: json.dump(__UpperCAmelCase , __UpperCAmelCase ) # loading hook def load_config(__UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] ): with open(os.path.join(__UpperCAmelCase , """data.json""" ) , """r""" ) as f: UpperCAmelCase_= json.load(__UpperCAmelCase ) UpperCAmelCase_= config["""class_name"""] UpperCAmelCase_= accelerator.register_save_state_pre_hook(__UpperCAmelCase ) UpperCAmelCase_= accelerator.register_load_state_pre_hook(__UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__UpperCAmelCase ) # make sure random weights don't match with hooks load_random_weights(__UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 ) # random class name to verify correct one is loaded UpperCAmelCase_= """random""" # make sure loaded weights match with hooks accelerator.load_state(__UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(__UpperCAmelCase ) # make sure random weights don't match with hooks removed load_random_weights(__UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1E-3 ) # random class name to verify correct one is loaded UpperCAmelCase_= """random""" # make sure loaded weights match with hooks removed accelerator.load_state(__UpperCAmelCase ) self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: UpperCAmelCase_= Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components() UpperCAmelCase_= None # This should work UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(dummy_obj is None ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_= Accelerator() UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= create_components() UpperCAmelCase_= [1, 2, 3] # This should work UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= accelerator.prepare( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertEqual( getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , ) self.assertEqual( getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , ) self.assertEqual( getattr(__UpperCAmelCase , """_is_accelerate_prepared""" , __UpperCAmelCase ) , __UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , ) @slow @require_bnb def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: from transformers import AutoModelForCausalLM UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map={"""""": 0} , ) UpperCAmelCase_= Accelerator() # This should work UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase ) @slow @require_bnb def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: from transformers import AutoModelForCausalLM UpperCAmelCase_= Accelerator() with init_empty_weights(): UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) model.tie_weights() UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase ) UpperCAmelCase_= """cpu""" UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase ) # This should not work and get value error with self.assertRaises(__UpperCAmelCase ): UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase ) @slow @require_bnb @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: from transformers import AutoModelForCausalLM UpperCAmelCase_= {"""distributed_type""": DistributedType.MULTI_GPU} with init_empty_weights(): UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) model.tie_weights() UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase ) UpperCAmelCase_= 1 UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , ) UpperCAmelCase_= Accelerator() # This should not work and get value error with self.assertRaises(__UpperCAmelCase ): UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: from transformers import AutoModelForCausalLM with init_empty_weights(): UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , ) UpperCAmelCase_= infer_auto_device_map(__UpperCAmelCase ) UpperCAmelCase_= 1 UpperCAmelCase_= AutoModelForCausalLM.from_pretrained( """EleutherAI/gpt-neo-125m""" , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , ) UpperCAmelCase_= Accelerator() # This should work UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase ) @require_cuda def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_= torch.nn.Linear(10 , 10 ) UpperCAmelCase_= torch.optim.SGD(model.parameters() , lr=0.01 ) UpperCAmelCase_= Accelerator(cpu=__UpperCAmelCase ) UpperCAmelCase_= accelerator.prepare(__UpperCAmelCase )
277
1
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Tuple = logging.get_logger(__name__) A : Any = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = '''realm''' def __init__(self : Any , _UpperCAmelCase : Tuple=3_0522 , _UpperCAmelCase : Optional[Any]=768 , _UpperCAmelCase : Optional[Any]=128 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : List[Any]=8 , _UpperCAmelCase : List[str]=3072 , _UpperCAmelCase : Any="gelu_new" , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Union[str, Any]=0.1 , _UpperCAmelCase : Optional[Any]=512 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Optional[Any]=0.02 , _UpperCAmelCase : Dict=1E-1_2 , _UpperCAmelCase : List[Any]=256 , _UpperCAmelCase : Optional[int]=10 , _UpperCAmelCase : Union[str, Any]=1E-3 , _UpperCAmelCase : int=5 , _UpperCAmelCase : str=320 , _UpperCAmelCase : str=1335_3718 , _UpperCAmelCase : List[Any]=5000 , _UpperCAmelCase : Optional[Any]=1 , _UpperCAmelCase : str=0 , _UpperCAmelCase : Tuple=2 , **_UpperCAmelCase : List[Any] , ) -> Tuple: """simple docstring""" super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase ) # Common config lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = retriever_proj_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = num_candidates lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = initializer_range lowercase__ = type_vocab_size lowercase__ = layer_norm_eps # Reader config lowercase__ = span_hidden_size lowercase__ = max_span_width lowercase__ = reader_layer_norm_eps lowercase__ = reader_beam_size lowercase__ = reader_seq_len # Retrieval config lowercase__ = num_block_records lowercase__ = searcher_beam_size
305
from __future__ import annotations from functools import lru_cache from math import ceil A : Optional[int] = 1_0_0 A : int = set(range(3, NUM_PRIMES, 2)) primes.add(2) A : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def UpperCamelCase ( __magic_name__ : int ) -> set[int]: """simple docstring""" if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowercase__ = set() lowercase__ = 42 lowercase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def UpperCamelCase ( __magic_name__ : int = 5000 ) -> int | None: """simple docstring""" for number_to_partition in range(1 , __magic_name__ ): if len(partition(__magic_name__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'{solution() = }')
305
1
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available from .timesteps import ( fastaa_timesteps, smartaa_timesteps, smartaa_timesteps, smartaaa_timesteps, smartaaa_timesteps, superaa_timesteps, superaa_timesteps, superaaa_timesteps, ) @dataclass class lowerCAmelCase__ ( A_ ): __a = 42 __a = 42 __a = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_if import IFPipeline from .pipeline_if_imgaimg import IFImgaImgPipeline from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline from .pipeline_if_inpainting import IFInpaintingPipeline from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline from .pipeline_if_superresolution import IFSuperResolutionPipeline from .safety_checker import IFSafetyChecker from .watermark import IFWatermarker
40
"""simple docstring""" def _UpperCAmelCase ( __lowerCamelCase : int ) -> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCamelCase ( unittest.TestCase ): def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ) -> Union[str, Any]: UpperCamelCase = size if size is not None else {"height": 18, "width": 18} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = apply_ocr def snake_case_ (self ) -> Any: return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCamelCase ( _lowercase , unittest.TestCase ): UpperCAmelCase_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None def snake_case_ (self ) -> Dict: UpperCamelCase = LayoutLMvaImageProcessingTester(self ) @property def snake_case_ (self ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__a , "do_resize" ) ) self.assertTrue(hasattr(__a , "size" ) ) self.assertTrue(hasattr(__a , "apply_ocr" ) ) def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def snake_case_ (self ) -> Optional[Any]: pass def snake_case_ (self ) -> Dict: # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a ) for image in image_inputs: self.assertIsInstance(__a , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , __a ) self.assertIsInstance(encoding.boxes , __a ) # Test batched UpperCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def snake_case_ (self ) -> Tuple: # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a ) for image in image_inputs: self.assertIsInstance(__a , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def snake_case_ (self ) -> Tuple: # Initialize image_processing UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a ) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched UpperCamelCase = image_processing(__a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def snake_case_ (self ) -> List[Any]: # with apply_OCR = True UpperCamelCase = LayoutLMvaImageProcessor() from datasets import load_dataset UpperCamelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) UpperCamelCase = Image.open(ds[0]["file"] ).convert("RGB" ) UpperCamelCase = image_processing(__a , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 UpperCamelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 UpperCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , __a ) self.assertListEqual(encoding.boxes , __a ) # with apply_OCR = False UpperCamelCase = LayoutLMvaImageProcessor(apply_ocr=__a ) UpperCamelCase = image_processing(__a , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
153
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _lowerCamelCase ( unittest.TestCase ): def snake_case_ (self ) -> Tuple: UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) UpperCamelCase = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) ) self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) ) def snake_case_ (self ) -> Union[str, Any]: UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] ) UpperCamelCase = get_activation("gelu" ) UpperCamelCase = get_activation("gelu_10" ) UpperCamelCase = torch_builtin(__a ) UpperCamelCase = geluaa(__a ) UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(__a ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def snake_case_ (self ) -> Any: get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__a ): get_activation("bogus" ) with self.assertRaises(__a ): get_activation(__a ) def snake_case_ (self ) -> Optional[Any]: UpperCamelCase = get_activation("gelu" ) UpperCamelCase = 1 UpperCamelCase = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__a ): UpperCamelCase = acta.a
153
1
'''simple docstring''' def SCREAMING_SNAKE_CASE( __lowercase , __lowercase , __lowercase , __lowercase ) -> int: A , A: Any = len(__lowercase ), len(grid[0] ) if ( min(__lowercase , __lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) A: List[Any] = 0 count += depth_first_search(__lowercase , row + 1 , __lowercase , __lowercase ) count += depth_first_search(__lowercase , row - 1 , __lowercase , __lowercase ) count += depth_first_search(__lowercase , __lowercase , col + 1 , __lowercase ) count += depth_first_search(__lowercase , __lowercase , col - 1 , __lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
334
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''camembert-base''': 512, } UpperCamelCase = '''▁''' class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES UpperCamelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : str = ["""input_ids""", """attention_mask"""] UpperCamelCase_ : int = CamembertTokenizer def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str="<s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : List[Any]="</s>" , SCREAMING_SNAKE_CASE_ : int="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : List[str]="<mask>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **SCREAMING_SNAKE_CASE_ : Any , ) -> Any: '''simple docstring''' A: Tuple = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token super().__init__( SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) A: Any = vocab_file A: Any = False if not self.vocab_file else True def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A: List[str] = [self.cls_token_id] A: List[str] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A: List[str] = [self.sep_token_id] A: Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return A: Dict = os.path.join( SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ ) return (out_vocab_file,)
334
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Dict = {} class a ( _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = """llama""" _lowerCAmelCase = ["""past_key_values"""] def __init__( self , __magic_name__=3_20_00 , __magic_name__=40_96 , __magic_name__=1_10_08 , __magic_name__=32 , __magic_name__=32 , __magic_name__=None , __magic_name__="silu" , __magic_name__=20_48 , __magic_name__=0.0_2 , __magic_name__=1e-6 , __magic_name__=True , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=1 , __magic_name__=False , __magic_name__=None , **__magic_name__ , ) -> str: _a = vocab_size _a = max_position_embeddings _a = hidden_size _a = intermediate_size _a = num_hidden_layers _a = num_attention_heads # for backward compatibility if num_key_value_heads is None: _a = num_attention_heads _a = num_key_value_heads _a = hidden_act _a = initializer_range _a = rms_norm_eps _a = pretraining_tp _a = use_cache _a = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , ) def __UpperCAmelCase ( self ) -> List[Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'got {self.rope_scaling}' ) _a = self.rope_scaling.get('type' , __magic_name__ ) _a = self.rope_scaling.get('factor' , __magic_name__ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
168
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType a_ : str = logging.get_logger(__name__) a_ : Tuple = { "microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json", } class a ( _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = """layoutlmv3""" def __init__( self , __magic_name__=5_02_65 , __magic_name__=7_68 , __magic_name__=12 , __magic_name__=12 , __magic_name__=30_72 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=5_12 , __magic_name__=2 , __magic_name__=0.0_2 , __magic_name__=1e-5 , __magic_name__=1 , __magic_name__=0 , __magic_name__=2 , __magic_name__=10_24 , __magic_name__=1_28 , __magic_name__=1_28 , __magic_name__=True , __magic_name__=32 , __magic_name__=1_28 , __magic_name__=64 , __magic_name__=2_56 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=2_24 , __magic_name__=3 , __magic_name__=16 , __magic_name__=None , **__magic_name__ , ) -> Dict: super().__init__( vocab_size=__magic_name__ , hidden_size=__magic_name__ , num_hidden_layers=__magic_name__ , num_attention_heads=__magic_name__ , intermediate_size=__magic_name__ , hidden_act=__magic_name__ , hidden_dropout_prob=__magic_name__ , attention_probs_dropout_prob=__magic_name__ , max_position_embeddings=__magic_name__ , type_vocab_size=__magic_name__ , initializer_range=__magic_name__ , layer_norm_eps=__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ , ) _a = max_ad_position_embeddings _a = coordinate_size _a = shape_size _a = has_relative_attention_bias _a = rel_pos_bins _a = max_rel_pos _a = has_spatial_attention_bias _a = rel_ad_pos_bins _a = max_rel_ad_pos _a = text_embed _a = visual_embed _a = input_size _a = num_channels _a = patch_size _a = classifier_dropout class a ( _SCREAMING_SNAKE_CASE ): _lowerCAmelCase = version.parse("""1.12""" ) @property def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) else: return OrderedDict( [ ('input_ids', {0: 'batch', 1: 'sequence'}), ('bbox', {0: 'batch', 1: 'sequence'}), ('attention_mask', {0: 'batch', 1: 'sequence'}), ('pixel_values', {0: 'batch', 1: 'num_channels'}), ] ) @property def __UpperCAmelCase ( self ) -> float: return 1e-5 @property def __UpperCAmelCase ( self ) -> int: return 12 def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , __magic_name__ = 3 , __magic_name__ = 40 , __magic_name__ = 40 , ) -> Mapping[str, Any]: setattr(processor.image_processor , 'apply_ocr' , __magic_name__ ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _a = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _a = processor.tokenizer.num_special_tokens_to_add(__magic_name__ ) _a = compute_effective_axis_dimension( __magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ ) # Generate dummy inputs according to compute batch and sequence _a = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes _a = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) _a = self._generate_dummy_images(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _a = dict( processor( __magic_name__ , text=__magic_name__ , boxes=__magic_name__ , return_tensors=__magic_name__ , ) ) return inputs
168
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
174
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED lowerCamelCase_ = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''allenai/led-base-16384''': 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def __lowercase ( ) -> Dict: '''simple docstring''' _A = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) _A = bs[:] _A = 0 for b in range(2**8 ): if b not in bs: bs.append(__lowercase ) cs.append(2**8 + n ) n += 1 _A = [chr(__lowercase ) for n in cs] return dict(zip(__lowercase , __lowercase ) ) def __lowercase ( __lowercase ) -> Dict: '''simple docstring''' _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs class _UpperCAmelCase ( snake_case_ ): """simple docstring""" snake_case = VOCAB_FILES_NAMES snake_case = PRETRAINED_VOCAB_FILES_MAP snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case = ['''input_ids''', '''attention_mask'''] def __init__( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Any , ): '''simple docstring''' _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle: _A = json.load(__UpperCAmelCase ) _A = {v: k for k, v in self.encoder.items()} _A = errors # how to handle errors in decoding _A = bytes_to_unicode() _A = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle: _A = merges_handle.read().split("\n" )[1:-1] _A = [tuple(merge.split() ) for merge in bpe_merges] _A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) _A = {} _A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _A = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCAmelCase ( self : str ): '''simple docstring''' return len(self.encoder ) def lowerCAmelCase ( self : Tuple ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ): '''simple docstring''' if token in self.cache: return self.cache[token] _A = tuple(__UpperCAmelCase ) _A = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: _A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) ) if bigram not in self.bpe_ranks: break _A , _A = bigram _A = [] _A = 0 while i < len(__UpperCAmelCase ): try: _A = word.index(__UpperCAmelCase , __UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(__UpperCAmelCase ) _A = new_word if len(__UpperCAmelCase ) == 1: break else: _A = get_pairs(__UpperCAmelCase ) _A = " ".join(__UpperCAmelCase ) _A = word return word def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ): '''simple docstring''' _A = [] for token in re.findall(self.pat , __UpperCAmelCase ): _A = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) ) return bpe_tokens def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple ): '''simple docstring''' return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ): '''simple docstring''' return self.decoder.get(__UpperCAmelCase ) def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ): '''simple docstring''' _A = "".join(__UpperCAmelCase ) _A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(__UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return _A = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) _A = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" ) _A = 0 with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) _A = token_index writer.write(" ".join(__UpperCAmelCase ) + "\n" ) index += 1 return vocab_file, merge_file def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _A = [self.cls_token_id] _A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ): '''simple docstring''' _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Any ): '''simple docstring''' _A = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()): _A = " " + text return (text, kwargs) def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ): '''simple docstring''' _A = super()._pad( encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , ) # Load from model defaults if return_attention_mask is None: _A = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _A = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _A = len(encoded_inputs["global_attention_mask"] ) != len(__UpperCAmelCase ) if needs_to_be_padded: _A = len(__UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _A = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": _A = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
174
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class __lowerCAmelCase ( unittest.TestCase ): def __init__( self : Any , A : str , A : Dict=7 , A : Dict=3 , A : List[str]=18 , A : int=30 , A : Any=4_00 , A : Union[str, Any]=True , A : List[str]=None , A : Dict=True , A : str=None , A : Dict=True , A : Optional[Any]=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , A : Optional[Any]=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , A : str=True , ) -> List[Any]: """simple docstring""" _UpperCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24} _UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = num_channels _UpperCAmelCase = image_size _UpperCAmelCase = min_resolution _UpperCAmelCase = max_resolution _UpperCAmelCase = do_resize _UpperCAmelCase = size _UpperCAmelCase = do_center_crop _UpperCAmelCase = crop_size _UpperCAmelCase = do_normalize _UpperCAmelCase = image_mean _UpperCAmelCase = image_std _UpperCAmelCase = do_convert_rgb def _lowerCamelCase ( self : Optional[int]) -> List[Any]: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def _lowerCamelCase ( self : List[str] , A : List[Any]=False , A : Union[str, Any]=False , A : List[Any]=False) -> str: """simple docstring""" assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: _UpperCAmelCase = [] for i in range(self.batch_size): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta)) else: _UpperCAmelCase = [] for i in range(self.batch_size): _UpperCAmelCase , _UpperCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta)) if not numpify and not torchify: # PIL expects the channel dimension as last dimension _UpperCAmelCase = [Image.fromarray(np.moveaxis(A , 0 , -1)) for x in image_inputs] if torchify: _UpperCAmelCase = [torch.from_numpy(A) for x in image_inputs] return image_inputs @require_torch @require_vision class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=A) @property def _lowerCamelCase ( self : List[str]) -> Any: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Any) -> str: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(A , 'do_resize')) self.assertTrue(hasattr(A , 'size')) self.assertTrue(hasattr(A , 'do_center_crop')) self.assertTrue(hasattr(A , 'center_crop')) self.assertTrue(hasattr(A , 'do_normalize')) self.assertTrue(hasattr(A , 'image_mean')) self.assertTrue(hasattr(A , 'image_std')) self.assertTrue(hasattr(A , 'do_convert_rgb')) def _lowerCamelCase ( self : str) -> Dict: """simple docstring""" _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) _UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'shortest_edge': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) def _lowerCamelCase ( self : List[Any]) -> Any: """simple docstring""" pass def _lowerCamelCase ( self : Tuple) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A) for image in image_inputs: self.assertIsInstance(A , Image.Image) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A , numpify=A) for image in image_inputs: self.assertIsInstance(A , np.ndarray) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _lowerCamelCase ( self : int) -> Dict: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A , torchify=A) for image in image_inputs: self.assertIsInstance(A , torch.Tensor) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) @require_torch @require_vision class __lowerCAmelCase ( A , unittest.TestCase ): UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None def _lowerCamelCase ( self : List[str]) -> str: """simple docstring""" _UpperCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=A) _UpperCAmelCase = 3 @property def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self : Optional[int]) -> List[str]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(A , 'do_resize')) self.assertTrue(hasattr(A , 'size')) self.assertTrue(hasattr(A , 'do_center_crop')) self.assertTrue(hasattr(A , 'center_crop')) self.assertTrue(hasattr(A , 'do_normalize')) self.assertTrue(hasattr(A , 'image_mean')) self.assertTrue(hasattr(A , 'image_std')) self.assertTrue(hasattr(A , 'do_convert_rgb')) def _lowerCamelCase ( self : Any) -> Union[str, Any]: """simple docstring""" pass def _lowerCamelCase ( self : Any) -> Optional[int]: """simple docstring""" _UpperCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=A) for image in image_inputs: self.assertIsInstance(A , Image.Image) # Test not batched input _UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _UpperCAmelCase = image_processing(A , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
339
from __future__ import annotations UpperCAmelCase__ = list[list[int]] # assigning initial values to the grid UpperCAmelCase__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def A ( _UpperCAmelCase : Matrix , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def A ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def A ( _UpperCAmelCase : Matrix ) -> Matrix | None: '''simple docstring''' if location := find_empty_location(_UpperCAmelCase ): _UpperCAmelCase , _UpperCAmelCase = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): _UpperCAmelCase = digit if sudoku(_UpperCAmelCase ) is not None: return grid _UpperCAmelCase = 0 return None def A ( _UpperCAmelCase : Matrix ) -> None: '''simple docstring''' for row in grid: for cell in row: print(_UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") UpperCAmelCase__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
339
1
"""simple docstring""" from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): a__ : Optional[int] = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"] @register_to_config def __init__( self : Tuple , _lowercase : int , _lowercase : int , _lowercase : Optional[int] = None , _lowercase : int = 5_02_57 , _lowercase : int = 10_24 , _lowercase : int = 7_68 , _lowercase : int = 12 , _lowercase : int = 12 , _lowercase : Optional[int] = None , _lowercase : str = "gelu_new" , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 0.1 , _lowercase : float = 1E-5 , _lowercase : float = 0.02 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : bool = False , _lowercase : bool = False , ): super().__init__() __UpperCAmelCase = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) __UpperCAmelCase = prefix_inner_dim __UpperCAmelCase = prefix_hidden_dim __UpperCAmelCase = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __UpperCAmelCase = ( nn.Linear(self.prefix_hidden_dim , _lowercase ) if self.prefix_hidden_dim is not None else nn.Identity() ) __UpperCAmelCase = GPTaConfig( vocab_size=_lowercase , n_positions=_lowercase , n_embd=_lowercase , n_layer=_lowercase , n_head=_lowercase , n_inner=_lowercase , activation_function=_lowercase , resid_pdrop=_lowercase , embd_pdrop=_lowercase , attn_pdrop=_lowercase , layer_norm_epsilon=_lowercase , initializer_range=_lowercase , scale_attn_weights=_lowercase , use_cache=_lowercase , scale_attn_by_inverse_layer_idx=_lowercase , reorder_and_upcast_attn=_lowercase , ) __UpperCAmelCase = GPTaLMHeadModel(_lowercase ) def a ( self : List[str] , _lowercase : torch.Tensor , _lowercase : torch.Tensor , _lowercase : Optional[torch.Tensor] = None , _lowercase : Optional[torch.Tensor] = None , ): __UpperCAmelCase = self.transformer.transformer.wte(_lowercase ) __UpperCAmelCase = self.encode_prefix(_lowercase ) __UpperCAmelCase = self.decode_prefix(_lowercase ) __UpperCAmelCase = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __UpperCAmelCase = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __UpperCAmelCase = torch.cat((dummy_token, input_ids) , dim=1 ) __UpperCAmelCase = self.transformer(inputs_embeds=_lowercase , labels=_lowercase , attention_mask=_lowercase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def a ( self : List[Any] , _lowercase : int , _lowercase : torch.device ): return torch.zeros(_lowercase , self.prefix_length , dtype=torch.intaa , device=_lowercase ) def a ( self : str , _lowercase : List[str] ): return self.encode_prefix(_lowercase ) @torch.no_grad() def a ( self : Optional[Any] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Optional[int] ): __UpperCAmelCase = torch.split(_lowercase , 1 , dim=0 ) __UpperCAmelCase = [] __UpperCAmelCase = [] for feature in features: __UpperCAmelCase = self.decode_prefix(feature.to(_lowercase ) ) # back to the clip feature # Only support beam search for now __UpperCAmelCase , __UpperCAmelCase = self.generate_beam( input_embeds=_lowercase , device=_lowercase , eos_token_id=_lowercase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __UpperCAmelCase = torch.stack(_lowercase ) __UpperCAmelCase = torch.stack(_lowercase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def a ( self : str , _lowercase : Any=None , _lowercase : Dict=None , _lowercase : Tuple=None , _lowercase : int = 5 , _lowercase : int = 67 , _lowercase : float = 1.0 , _lowercase : Optional[int] = None , ): __UpperCAmelCase = eos_token_id __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = torch.ones(_lowercase , device=_lowercase , dtype=torch.int ) __UpperCAmelCase = torch.zeros(_lowercase , device=_lowercase , dtype=torch.bool ) if input_embeds is not None: __UpperCAmelCase = input_embeds else: __UpperCAmelCase = self.transformer.transformer.wte(_lowercase ) for i in range(_lowercase ): __UpperCAmelCase = self.transformer(inputs_embeds=_lowercase ) __UpperCAmelCase = outputs.logits __UpperCAmelCase = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __UpperCAmelCase = logits.softmax(-1 ).log() if scores is None: __UpperCAmelCase , __UpperCAmelCase = logits.topk(_lowercase , -1 ) __UpperCAmelCase = generated.expand(_lowercase , *generated.shape[1:] ) __UpperCAmelCase , __UpperCAmelCase = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __UpperCAmelCase = next_tokens else: __UpperCAmelCase = tokens.expand(_lowercase , *tokens.shape[1:] ) __UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 ) else: __UpperCAmelCase = -float(np.inf ) __UpperCAmelCase = 0 __UpperCAmelCase = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __UpperCAmelCase = scores_sum / seq_lengths[:, None] __UpperCAmelCase , __UpperCAmelCase = scores_sum_average.view(-1 ).topk(_lowercase , -1 ) __UpperCAmelCase = next_tokens // scores_sum.shape[1] __UpperCAmelCase = seq_lengths[next_tokens_source] __UpperCAmelCase = next_tokens % scores_sum.shape[1] __UpperCAmelCase = next_tokens.unsqueeze(1 ) __UpperCAmelCase = tokens[next_tokens_source] __UpperCAmelCase = torch.cat((tokens, next_tokens) , dim=1 ) __UpperCAmelCase = generated[next_tokens_source] __UpperCAmelCase = scores_sum_average * seq_lengths __UpperCAmelCase = is_stopped[next_tokens_source] __UpperCAmelCase = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __UpperCAmelCase = torch.cat((generated, next_token_embed) , dim=1 ) __UpperCAmelCase = is_stopped + next_tokens.eq(_lowercase ).squeeze() if is_stopped.all(): break __UpperCAmelCase = scores / seq_lengths __UpperCAmelCase = scores.argsort(descending=_lowercase ) # tokens tensors are already padded to max_seq_length __UpperCAmelCase = [tokens[i] for i in order] __UpperCAmelCase = torch.stack(_lowercase , dim=0 ) __UpperCAmelCase = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
370
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class _UpperCAmelCase ( enum.Enum ): a__ : str = 0 a__ : List[Any] = 1 a__ : str = 2 @add_end_docstrings(_lowerCAmelCase ) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Dict = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n " def __init__( self : Optional[Any] , *_lowercase : Any , **_lowercase : Optional[int] ): super().__init__(*_lowercase , **_lowercase ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __UpperCAmelCase = None if self.model.config.prefix is not None: __UpperCAmelCase = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __UpperCAmelCase = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._sanitize_parameters(prefix=_lowercase , **self._forward_params ) __UpperCAmelCase = {**self._preprocess_params, **preprocess_params} __UpperCAmelCase = {**self._forward_params, **forward_params} def a ( self : Any , _lowercase : Optional[Any]=None , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=None , **_lowercase : str , ): __UpperCAmelCase = {} if prefix is not None: __UpperCAmelCase = prefix if prefix: __UpperCAmelCase = self.tokenizer( _lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework ) __UpperCAmelCase = prefix_inputs['''input_ids'''].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected''' ''' [None, \'hole\']''' ) __UpperCAmelCase = handle_long_generation preprocess_params.update(_lowercase ) __UpperCAmelCase = generate_kwargs __UpperCAmelCase = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' ) if return_tensors is not None: raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' ) __UpperCAmelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' ) __UpperCAmelCase = ReturnType.TENSORS if return_type is not None: __UpperCAmelCase = return_type if clean_up_tokenization_spaces is not None: __UpperCAmelCase = clean_up_tokenization_spaces if stop_sequence is not None: __UpperCAmelCase = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) if len(_lowercase ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) __UpperCAmelCase = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def a ( self : Optional[int] , *_lowercase : Optional[int] , **_lowercase : Any ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'''add_space_before_punct_symbol''': True} ) return super()._parse_and_tokenize(*_lowercase , **_lowercase ) def __call__( self : List[str] , _lowercase : str , **_lowercase : Optional[Any] ): return super().__call__(_lowercase , **_lowercase ) def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : Dict="" , _lowercase : Union[str, Any]=None , **_lowercase : Tuple ): __UpperCAmelCase = self.tokenizer( prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework ) __UpperCAmelCase = prompt_text if handle_long_generation == "hole": __UpperCAmelCase = inputs['''input_ids'''].shape[-1] if "max_new_tokens" in generate_kwargs: __UpperCAmelCase = generate_kwargs['''max_new_tokens'''] else: __UpperCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('''We cannot infer how many new tokens are expected''' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __UpperCAmelCase = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( '''We cannot use `hole` to handle this generation the number of desired tokens exceeds the''' ''' models max length''' ) __UpperCAmelCase = inputs['''input_ids'''][:, -keep_length:] if "attention_mask" in inputs: __UpperCAmelCase = inputs['''attention_mask'''][:, -keep_length:] return inputs def a ( self : Union[str, Any] , _lowercase : List[str] , **_lowercase : Optional[int] ): __UpperCAmelCase = model_inputs['''input_ids'''] __UpperCAmelCase = model_inputs.get('''attention_mask''' , _lowercase ) # Allow empty prompts if input_ids.shape[1] == 0: __UpperCAmelCase = None __UpperCAmelCase = None __UpperCAmelCase = 1 else: __UpperCAmelCase = input_ids.shape[0] __UpperCAmelCase = model_inputs.pop('''prompt_text''' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __UpperCAmelCase = generate_kwargs.pop('''prefix_length''' , 0 ) if prefix_length > 0: __UpperCAmelCase = '''max_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].max_new_tokens is not None ) if not has_max_new_tokens: __UpperCAmelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __UpperCAmelCase = '''min_new_tokens''' in generate_kwargs or ( '''generation_config''' in generate_kwargs and generate_kwargs['''generation_config'''].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __UpperCAmelCase = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase ) __UpperCAmelCase = generated_sequence.shape[0] if self.framework == "pt": __UpperCAmelCase = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __UpperCAmelCase = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def a ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=ReturnType.FULL_TEXT , _lowercase : List[str]=True ): __UpperCAmelCase = model_outputs['''generated_sequence'''][0] __UpperCAmelCase = model_outputs['''input_ids'''] __UpperCAmelCase = model_outputs['''prompt_text'''] __UpperCAmelCase = generated_sequence.numpy().tolist() __UpperCAmelCase = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __UpperCAmelCase = {'''generated_token_ids''': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __UpperCAmelCase = self.tokenizer.decode( _lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __UpperCAmelCase = 0 else: __UpperCAmelCase = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) ) if return_type == ReturnType.FULL_TEXT: __UpperCAmelCase = prompt_text + text[prompt_length:] else: __UpperCAmelCase = text[prompt_length:] __UpperCAmelCase = {'''generated_text''': all_text} records.append(_lowercase ) return records
86
0
def lowerCAmelCase_ ( A_): if not isinstance(A_ ,A_): UpperCamelCase__: Dict = F"Input value of [number={number}] must be an integer" raise TypeError(A_) if number < 0: return False UpperCamelCase__: Any = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
149
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _a ( UpperCamelCase__): """simple docstring""" def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' UpperCamelCase__: str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCamelCase , "embed_dim" ) ) self.parent.assertTrue(hasattr(__lowerCamelCase , "num_heads" ) ) class _a : """simple docstring""" def __init__( self: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: str=13 , __lowerCamelCase: Tuple=64 , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: List[Any]=[16, 48, 96] , __lowerCamelCase: Union[str, Any]=[1, 3, 6] , __lowerCamelCase: Tuple=[1, 2, 10] , __lowerCamelCase: int=[7, 3, 3] , __lowerCamelCase: Dict=[4, 2, 2] , __lowerCamelCase: int=[2, 1, 1] , __lowerCamelCase: Dict=[2, 2, 2] , __lowerCamelCase: List[str]=[False, False, True] , __lowerCamelCase: str=[0.0, 0.0, 0.0] , __lowerCamelCase: Union[str, Any]=0.02 , __lowerCamelCase: str=1e-12 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Tuple=True , __lowerCamelCase: Union[str, Any]=2 , ): '''simple docstring''' UpperCamelCase__: Dict = parent UpperCamelCase__: Union[str, Any] = batch_size UpperCamelCase__: int = image_size UpperCamelCase__: Dict = patch_sizes UpperCamelCase__: Any = patch_stride UpperCamelCase__: Optional[int] = patch_padding UpperCamelCase__: Any = is_training UpperCamelCase__: Dict = use_labels UpperCamelCase__: List[str] = num_labels UpperCamelCase__: Tuple = num_channels UpperCamelCase__: int = embed_dim UpperCamelCase__: int = num_heads UpperCamelCase__: Dict = stride_kv UpperCamelCase__: Optional[int] = depth UpperCamelCase__: int = cls_token UpperCamelCase__: Optional[Any] = attention_drop_rate UpperCamelCase__: Tuple = initializer_range UpperCamelCase__: Dict = layer_norm_eps def UpperCAmelCase_ ( self: List[Any] ): '''simple docstring''' UpperCamelCase__: Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__: Any = None if self.use_labels: # create a random int32 tensor of given shape UpperCamelCase__: Tuple = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase__: List[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ ( self: Any ): '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase_ ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: List[str] , __lowerCamelCase: Optional[Any] ): '''simple docstring''' UpperCamelCase__: str = TFCvtModel(config=__lowerCamelCase ) UpperCamelCase__: str = model(__lowerCamelCase , training=__lowerCamelCase ) UpperCamelCase__: Optional[Any] = (self.image_size, self.image_size) UpperCamelCase__ , UpperCamelCase__: Optional[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCamelCase__: Optional[Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCamelCase__: List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def UpperCAmelCase_ ( self: Dict , __lowerCamelCase: Optional[Any] , __lowerCamelCase: Union[str, Any] , __lowerCamelCase: Dict ): '''simple docstring''' UpperCamelCase__: int = self.num_labels UpperCamelCase__: Tuple = TFCvtForImageClassification(__lowerCamelCase ) UpperCamelCase__: Tuple = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase_ ( self: Union[str, Any] ): '''simple docstring''' UpperCamelCase__: Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Dict = config_and_inputs UpperCamelCase__: int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): """simple docstring""" UpperCamelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () UpperCamelCase__ = ( {"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification} if is_tf_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def UpperCAmelCase_ ( self: int ): '''simple docstring''' UpperCamelCase__: Optional[Any] = TFCvtModelTester(self ) UpperCamelCase__: int = TFCvtConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 ) def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="Cvt does not output attentions" ) def UpperCAmelCase_ ( self: Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="Cvt does not use inputs_embeds" ) def UpperCAmelCase_ ( self: List[str] ): '''simple docstring''' pass @unittest.skip(reason="Cvt does not support input and output embeddings" ) def UpperCAmelCase_ ( self: Optional[int] ): '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) def UpperCAmelCase_ ( self: int ): '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def UpperCAmelCase_ ( self: List[str] ): '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" ) def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' UpperCamelCase__: int = tf.keras.mixed_precision.Policy("mixed_float16" ) tf.keras.mixed_precision.set_global_policy(__lowerCamelCase ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("float32" ) def UpperCAmelCase_ ( self: List[str] ): '''simple docstring''' UpperCamelCase__ , UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__: int = model_class(__lowerCamelCase ) UpperCamelCase__: Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__: List[str] = [*signature.parameters.keys()] UpperCamelCase__: Optional[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def UpperCAmelCase_ ( self: Dict ): '''simple docstring''' def check_hidden_states_output(__lowerCamelCase: Union[str, Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: str ): UpperCamelCase__: Tuple = model_class(__lowerCamelCase ) UpperCamelCase__: Any = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) ) UpperCamelCase__: Optional[Any] = outputs.hidden_states UpperCamelCase__: str = len(self.model_tester.depth ) self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCamelCase__ , UpperCamelCase__: List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__: int = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase__: Union[str, Any] = True check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) def UpperCAmelCase_ ( self: List[Any] ): '''simple docstring''' UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCamelCase ) def UpperCAmelCase_ ( self: Tuple ): '''simple docstring''' UpperCamelCase__: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase ) @slow def UpperCAmelCase_ ( self: Optional[Any] ): '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__: List[Any] = TFCvtModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) def lowerCAmelCase_ ( ): UpperCamelCase__: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class _a ( unittest.TestCase): """simple docstring""" @cached_property def UpperCAmelCase_ ( self: Union[str, Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCAmelCase_ ( self: List[Any] ): '''simple docstring''' UpperCamelCase__: Union[str, Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCamelCase__: str = self.default_image_processor UpperCamelCase__: List[str] = prepare_img() UpperCamelCase__: Tuple = image_processor(images=__lowerCamelCase , return_tensors="tf" ) # forward pass UpperCamelCase__: List[str] = model(**__lowerCamelCase ) # verify the logits UpperCamelCase__: Optional[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCamelCase ) UpperCamelCase__: Optional[Any] = tf.constant([0.9_285, 0.9_015, -0.3_150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowerCamelCase , atol=1e-4 ) )
149
1
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset snake_case__ : Union[str, Any] = '''bert-base-cased''' snake_case__ : List[str] = '''google/pegasus-xsum''' snake_case__ : int = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] snake_case__ : Tuple = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] snake_case__ : List[str] = '''patrickvonplaten/t5-tiny-random''' snake_case__ : Dict = '''sshleifer/bart-tiny-random''' snake_case__ : List[Any] = '''sshleifer/tiny-mbart''' snake_case__ : Dict = '''sshleifer/tiny-marian-en-de''' def _snake_case ( _snake_case : Any , _snake_case : str ): lowerCAmelCase : List[Any] = '\n'.join(__a ) Path(__a ).open('''w''' ).writelines(__a ) def _snake_case ( _snake_case : Union[str, Any] ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(__a , f'''{split}.source''' ) , __a ) _dump_articles(os.path.join(__a , f'''{split}.target''' ) , __a ) return tmp_dir class snake_case_( snake_case_ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any ): lowerCAmelCase : int = AutoTokenizer.from_pretrained(_A ) lowerCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase : Dict = max(len(tokenizer.encode(_A ) ) for a in ARTICLES ) lowerCAmelCase : str = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES ) lowerCAmelCase : str = 4 lowerCAmelCase : List[str] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase : Optional[int] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. lowerCAmelCase : Union[str, Any] = SeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , src_lang=_A , tgt_lang=_A , ) lowerCAmelCase : Optional[Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(_A , _A ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase : Union[str, Any] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_A ) lowerCAmelCase : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase : List[str] = max(len(tokenizer.encode(_A ) ) for a in ARTICLES ) lowerCAmelCase : Dict = max(len(tokenizer.encode(_A ) ) for a in SUMMARIES ) lowerCAmelCase : str = 4 lowerCAmelCase : Tuple = LegacySeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=2_0 , max_target_length=_A , ) lowerCAmelCase : Union[str, Any] = DataLoader(_A , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Dict = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) lowerCAmelCase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase : Optional[Any] = tmp_dir.joinpath('''train.source''' ).open().readlines() lowerCAmelCase : Optional[int] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(_A , _A , 1_2_8 , _A ) lowerCAmelCase : Optional[int] = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase : Optional[Any] = {x.name for x in save_dir.iterdir()} lowerCAmelCase : List[str] = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(_A ) < len(_A ) assert len(_A ) == 1 assert len(packed_examples[0] ) == sum(len(_A ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def lowerCamelCase__ ( self : Union[str, Any] ): if not FAIRSEQ_AVAILABLE: return lowerCAmelCase : str = self._get_dataset(max_len=6_4 ) lowerCAmelCase : Optional[int] = 6_4 lowerCAmelCase : List[Any] = ds.make_dynamic_sampler(_A , required_batch_size_multiple=_A ) lowerCAmelCase : Optional[int] = [len(_A ) for x in batch_sampler] assert len(set(_A ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(_A ) == len(_A ) # no dropped or added examples lowerCAmelCase : List[str] = DataLoader(_A , batch_sampler=_A , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase : List[Any] = [] lowerCAmelCase : Optional[int] = [] for batch in data_loader: lowerCAmelCase : str = batch['input_ids'].shape lowerCAmelCase : Tuple = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase : Optional[int] = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(_A ) if num_src_tokens > (max_tokens * 1.1): failures.append(_A ) assert num_src_per_batch[0] == max(_A ) if failures: raise AssertionError(F'''too many tokens in {len(_A )} batches''' ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[Any] = self._get_dataset(max_len=5_1_2 ) lowerCAmelCase : Dict = 2 lowerCAmelCase : Dict = ds.make_sortish_sampler(_A , shuffle=_A ) lowerCAmelCase : Dict = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase : Union[str, Any] = DataLoader(_A , batch_size=_A , collate_fn=ds.collate_fn , num_workers=2 , sampler=_A ) lowerCAmelCase : List[Any] = tokenizer.pad_token_id def count_pad_tokens(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int="input_ids" ): return [batch[k].eq(_A ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(_A , k='''labels''' ) ) < sum(count_pad_tokens(_A , k='''labels''' ) ) assert sum(count_pad_tokens(_A ) ) < sum(count_pad_tokens(_A ) ) assert len(_A ) == len(_A ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[int]=1_0_0_0 , UpperCamelCase_ : List[str]=1_2_8 ): if os.getenv('''USE_REAL_DATA''' , _A ): lowerCAmelCase : List[Any] = 'examples/seq2seq/wmt_en_ro' lowerCAmelCase : Optional[Any] = max_len * 2 * 6_4 if not Path(_A ).joinpath('''train.len''' ).exists(): save_len_file(_A , _A ) else: lowerCAmelCase : Union[str, Any] = 'examples/seq2seq/test_data/wmt_en_ro' lowerCAmelCase : List[str] = max_len * 4 save_len_file(_A , _A ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_A ) lowerCAmelCase : str = SeqaSeqDataset( _A , data_dir=_A , type_path='''train''' , max_source_length=_A , max_target_length=_A , n_obs=_A , ) return ds, max_tokens, tokenizer def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[int] = self._get_dataset() lowerCAmelCase : List[str] = set(DistributedSortishSampler(_A , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=_A ) ) lowerCAmelCase : List[str] = set(DistributedSortishSampler(_A , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=_A ) ) assert idsa.intersection(_A ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A ) if tok_name == MBART_TINY: lowerCAmelCase : Optional[int] = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) lowerCAmelCase : Dict = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase : Optional[int] = SeqaSeqDataset( _A , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(_A ) == 1 if tok_name == BART_TINY else len(_A ) == 0
350
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Any = { '''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class snake_case_( a__ ): __UpperCamelCase = '''vit_msn''' def __init__( self : Dict , UpperCamelCase_ : str=7_6_8 , UpperCamelCase_ : List[Any]=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : str=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu" , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=1E-06 , UpperCamelCase_ : Tuple=2_2_4 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=True , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Any = hidden_size lowerCAmelCase : Tuple = num_hidden_layers lowerCAmelCase : List[Any] = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : Tuple = initializer_range lowerCAmelCase : Union[str, Any] = layer_norm_eps lowerCAmelCase : Tuple = image_size lowerCAmelCase : List[str] = patch_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = qkv_bias
314
0
from math import log from scipy.constants import Boltzmann, physical_constants _UpperCAmelCase : Any = 300 # TEMPERATURE (unit = K) def A ( lowercase , lowercase , lowercase , ) -> float: '''simple docstring''' if donor_conc <= 0: raise ValueError('Donor concentration should be positive' ) elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive' ) elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive' ) elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
222
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int: if index == r: for j in range(lowercase ): print(data[j] , end=" " ) print(" " ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __snake_case : Union[str, Any] = arr[i] combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]: # A temporary array to store all combination one by one __snake_case : Tuple = [0] * r # Print all combination using temporary array 'data[]' combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 ) if __name__ == "__main__": # Driver code to check the function above _UpperCamelCase = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
326
0
'''simple docstring''' import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# SCREAMING_SNAKE_CASE_: List[Any] =[ # (stable-diffusion, HF Diffusers) ('time_embed.0.weight', 'time_embedding.linear_1.weight'), ('time_embed.0.bias', 'time_embedding.linear_1.bias'), ('time_embed.2.weight', 'time_embedding.linear_2.weight'), ('time_embed.2.bias', 'time_embedding.linear_2.bias'), ('input_blocks.0.0.weight', 'conv_in.weight'), ('input_blocks.0.0.bias', 'conv_in.bias'), ('out.0.weight', 'conv_norm_out.weight'), ('out.0.bias', 'conv_norm_out.bias'), ('out.2.weight', 'conv_out.weight'), ('out.2.bias', 'conv_out.bias'), ] SCREAMING_SNAKE_CASE_: Optional[Any] =[ # (stable-diffusion, HF Diffusers) ('in_layers.0', 'norm1'), ('in_layers.2', 'conv1'), ('out_layers.0', 'norm2'), ('out_layers.3', 'conv2'), ('emb_layers.1', 'time_emb_proj'), ('skip_connection', 'conv_shortcut'), ] SCREAMING_SNAKE_CASE_: Tuple =[] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks SCREAMING_SNAKE_CASE_: List[str] =f"down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Optional[Any] =f"input_blocks.{3*i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 SCREAMING_SNAKE_CASE_: List[str] =f"down_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: List[Any] =f"input_blocks.{3*i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks SCREAMING_SNAKE_CASE_: Any =f"up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Dict =f"output_blocks.{3*i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 SCREAMING_SNAKE_CASE_: Optional[Any] =f"up_blocks.{i}.attentions.{j}." SCREAMING_SNAKE_CASE_: Union[str, Any] =f"output_blocks.{3*i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 SCREAMING_SNAKE_CASE_: Optional[Any] =f"down_blocks.{i}.downsamplers.0.conv." SCREAMING_SNAKE_CASE_: Optional[Any] =f"input_blocks.{3*(i+1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 SCREAMING_SNAKE_CASE_: List[str] =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: List[Any] =f"output_blocks.{3*i + 2}.{1 if i == 0 else 2}." unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) SCREAMING_SNAKE_CASE_: List[Any] ='mid_block.attentions.0.' SCREAMING_SNAKE_CASE_: int ='middle_block.1.' unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): SCREAMING_SNAKE_CASE_: Union[str, Any] =f"mid_block.resnets.{j}." SCREAMING_SNAKE_CASE_: int =f"middle_block.{2*j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCAmelCase_ ( snake_case_ : str ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: UpperCAmelCase_ = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# SCREAMING_SNAKE_CASE_: Any =[ # (stable-diffusion, HF Diffusers) ('nin_shortcut', 'conv_shortcut'), ('norm_out', 'conv_norm_out'), ('mid.attn_1.', 'mid_block.attentions.0.'), ] for i in range(4): # down_blocks have two resnets for j in range(2): SCREAMING_SNAKE_CASE_: Any =f"encoder.down_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: str =f"encoder.down.{i}.block.{j}." vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: SCREAMING_SNAKE_CASE_: Any =f"down_blocks.{i}.downsamplers.0." SCREAMING_SNAKE_CASE_: List[Any] =f"down.{i}.downsample." vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) SCREAMING_SNAKE_CASE_: List[str] =f"up_blocks.{i}.upsamplers.0." SCREAMING_SNAKE_CASE_: Dict =f"up.{3-i}.upsample." vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): SCREAMING_SNAKE_CASE_: Optional[int] =f"decoder.up_blocks.{i}.resnets.{j}." SCREAMING_SNAKE_CASE_: Tuple =f"decoder.up.{3-i}.block.{j}." vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): SCREAMING_SNAKE_CASE_: str =f"mid_block.resnets.{i}." SCREAMING_SNAKE_CASE_: Optional[Any] =f"mid.block_{i+1}." vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) SCREAMING_SNAKE_CASE_: Optional[int] =[ # (stable-diffusion, HF Diffusers) ('norm.', 'group_norm.'), ('q.', 'query.'), ('k.', 'key.'), ('v.', 'value.'), ('proj_out.', 'proj_attn.'), ] def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Optional[int]: '''simple docstring''' return w.reshape(*w.shape , 1 , 1 ) def lowerCAmelCase_ ( snake_case_ : Union[str, Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: UpperCAmelCase_ = v.replace(snake_case_ , snake_case_ ) UpperCAmelCase_ = v UpperCAmelCase_ = {v: vae_state_dict[k] for k, v in mapping.items()} UpperCAmelCase_ = ["q", "k", "v", "proj_out"] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if f"""mid.attn_1.{weight_name}.weight""" in k: print(f"""Reshaping {k} for SD format""" ) UpperCAmelCase_ = reshape_weight_for_sd(snake_case_ ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# SCREAMING_SNAKE_CASE_: Tuple =[ # (stable-diffusion, HF Diffusers) ('resblocks.', 'text_model.encoder.layers.'), ('ln_1', 'layer_norm1'), ('ln_2', 'layer_norm2'), ('.c_fc.', '.fc1.'), ('.c_proj.', '.fc2.'), ('.attn', '.self_attn'), ('ln_final.', 'transformer.text_model.final_layer_norm.'), ('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'), ('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'), ] SCREAMING_SNAKE_CASE_: Optional[int] ={re.escape(x[1]): x[0] for x in textenc_conversion_lst} SCREAMING_SNAKE_CASE_: List[str] =re.compile('|'.join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp SCREAMING_SNAKE_CASE_: Union[str, Any] ={'q': 0, 'k': 1, 'v': 2} def lowerCAmelCase_ ( snake_case_ : Any ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = {} UpperCAmelCase_ = {} UpperCAmelCase_ = {} for k, v in text_enc_dict.items(): if ( k.endswith(".self_attn.q_proj.weight" ) or k.endswith(".self_attn.k_proj.weight" ) or k.endswith(".self_attn.v_proj.weight" ) ): UpperCAmelCase_ = k[: -len(".q_proj.weight" )] UpperCAmelCase_ = k[-len("q_proj.weight" )] if k_pre not in capture_qkv_weight: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue if ( k.endswith(".self_attn.q_proj.bias" ) or k.endswith(".self_attn.k_proj.bias" ) or k.endswith(".self_attn.v_proj.bias" ) ): UpperCAmelCase_ = k[: -len(".q_proj.bias" )] UpperCAmelCase_ = k[-len("q_proj.bias" )] if k_pre not in capture_qkv_bias: UpperCAmelCase_ = [None, None, None] UpperCAmelCase_ = v continue UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" ) UpperCAmelCase_ = textenc_pattern.sub(lambda snake_case_ : protected[re.escape(m.group(0 ) )] , snake_case_ ) UpperCAmelCase_ = torch.cat(snake_case_ ) return new_state_dict def lowerCAmelCase_ ( snake_case_ : Tuple ) -> Optional[int]: '''simple docstring''' return text_enc_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE_: Union[str, Any] =argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.' ) SCREAMING_SNAKE_CASE_: Tuple =parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors SCREAMING_SNAKE_CASE_: Optional[Any] =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: List[Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors') SCREAMING_SNAKE_CASE_: Optional[int] =osp.join(args.model_path, 'text_encoder', 'model.safetensors') # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): SCREAMING_SNAKE_CASE_: Optional[Any] =load_file(unet_path, device='cpu') else: SCREAMING_SNAKE_CASE_: Optional[int] =osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: Optional[int] =torch.load(unet_path, map_location='cpu') if osp.exists(vae_path): SCREAMING_SNAKE_CASE_: Optional[int] =load_file(vae_path, device='cpu') else: SCREAMING_SNAKE_CASE_: Union[str, Any] =osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin') SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.load(vae_path, map_location='cpu') if osp.exists(text_enc_path): SCREAMING_SNAKE_CASE_: List[str] =load_file(text_enc_path, device='cpu') else: SCREAMING_SNAKE_CASE_: Optional[Any] =osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin') SCREAMING_SNAKE_CASE_: List[Any] =torch.load(text_enc_path, map_location='cpu') # Convert the UNet model SCREAMING_SNAKE_CASE_: Tuple =convert_unet_state_dict(unet_state_dict) SCREAMING_SNAKE_CASE_: List[str] ={'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()} # Convert the VAE model SCREAMING_SNAKE_CASE_: Dict =convert_vae_state_dict(vae_state_dict) SCREAMING_SNAKE_CASE_: Union[str, Any] ={'first_stage_model.' + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper SCREAMING_SNAKE_CASE_: Optional[int] ='text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm SCREAMING_SNAKE_CASE_: Tuple ={'transformer.' + k: v for k, v in text_enc_dict.items()} SCREAMING_SNAKE_CASE_: Any =convert_text_enc_state_dict_vaa(text_enc_dict) SCREAMING_SNAKE_CASE_: Optional[Any] ={'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()} else: SCREAMING_SNAKE_CASE_: Dict =convert_text_enc_state_dict(text_enc_dict) SCREAMING_SNAKE_CASE_: Any ={'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint SCREAMING_SNAKE_CASE_: Any ={**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: SCREAMING_SNAKE_CASE_: Any ={k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: SCREAMING_SNAKE_CASE_: Any ={'state_dict': state_dict} torch.save(state_dict, args.checkpoint_path)
106
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE_: Tuple ={ 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class __A ( UpperCamelCase__ , UpperCamelCase__ ): a__ : Optional[Any] = """resnet""" a__ : Tuple = ["""basic""", """bottleneck"""] def __init__(self : List[Any] , __a : Any=3 , __a : Dict=64 , __a : Union[str, Any]=[256, 512, 1024, 2048] , __a : str=[3, 4, 6, 3] , __a : Optional[Any]="bottleneck" , __a : Tuple="relu" , __a : int=False , __a : Optional[int]=None , __a : str=None , **__a : Dict , ): super().__init__(**__a ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) UpperCAmelCase_ = num_channels UpperCAmelCase_ = embedding_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = layer_type UpperCAmelCase_ = hidden_act UpperCAmelCase_ = downsample_in_first_stage UpperCAmelCase_ = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(__a ) + 1 )] UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names ) class __A ( UpperCamelCase__ ): a__ : int = version.parse("""1.11""" ) @property def _lowercase (self : Optional[int] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def _lowercase (self : str ): return 1E-3
106
1
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline lowerCamelCase :Tuple = '''path-to-your-trained-model''' lowerCamelCase :Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''') lowerCamelCase :Optional[int] = '''A photo of sks dog in a bucket''' lowerCamelCase :List[Any] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0] image.save('''dog-bucket.png''')
206
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging lowerCamelCase :Any = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCAmelCase ( __UpperCAmelCase ): def __init__(self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ): super().__init__() self.register_modules( vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , unet=lowercase , scheduler=lowercase , safety_checker=lowercase , feature_extractor=lowercase , ) def _a (self , lowercase = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory A_ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase ) def _a (self ): self.enable_attention_slicing(lowercase ) @torch.no_grad() def __call__(self , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 50 , lowercase = 7.5 , lowercase = None , lowercase = 1 , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , lowercase = None , **lowercase , ): if isinstance(lowercase , lowercase ): A_ : Union[str, Any] = 1 elif isinstance(lowercase , lowercase ): A_ : Any = len(lowercase ) else: raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(lowercase )}' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0) ): raise ValueError( F'`callback_steps` has to be a positive integer but is {callback_steps} of type' F' {type(lowercase )}.' ) # get prompt text embeddings A_ : Optional[Any] = self.tokenizer( lowercase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) A_ : Dict = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: A_ : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F' {self.tokenizer.model_max_length} tokens: {removed_text}' ) A_ : Dict = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: A_ : Any = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method A_, A_, A_ : Tuple = text_embeddings.shape A_ : Optional[Any] = text_embeddings.repeat(1 , lowercase , 1 ) A_ : Any = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. A_ : List[str] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: A_ : List[str] if negative_prompt is None: A_ : Optional[int] = [""""""] elif type(lowercase ) is not type(lowercase ): raise TypeError( F'`negative_prompt` should be the same type to `prompt`, but got {type(lowercase )} !=' F' {type(lowercase )}.' ) elif isinstance(lowercase , lowercase ): A_ : Dict = [negative_prompt] elif batch_size != len(lowercase ): raise ValueError( F'`negative_prompt`: {negative_prompt} has batch size {len(lowercase )}, but `prompt`:' F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches' """ the batch size of `prompt`.""" ) else: A_ : Dict = negative_prompt A_ : int = text_input_ids.shape[-1] A_ : List[Any] = self.tokenizer( lowercase , padding="""max_length""" , max_length=lowercase , truncation=lowercase , return_tensors="""pt""" , ) A_ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method A_ : Optional[Any] = uncond_embeddings.shape[1] A_ : str = uncond_embeddings.repeat(lowercase , lowercase , 1 ) A_ : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes A_ : Dict = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. A_ : int = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) A_ : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) A_ : Dict = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps A_ : Tuple = torch.randn( lowercase , generator=lowercase , device="""cpu""" , dtype=lowercase ).to(self.device ) A_ : int = torch.randn(lowercase , generator=lowercase , device="""cpu""" , dtype=lowercase ).to( self.device ) else: A_ : int = torch.randn( lowercase , generator=lowercase , device=self.device , dtype=lowercase ) A_ : str = torch.randn(lowercase , generator=lowercase , device=self.device , dtype=lowercase ) else: if latents_reference.shape != latents_shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' ) A_ : str = latents_reference.to(self.device ) A_ : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images A_ : Optional[int] = (latents_shape[3] - latents_shape_reference[3]) // 2 A_ : Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2 A_ : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx A_ : int = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy A_ : Optional[Any] = 0 if dx < 0 else dx A_ : Optional[Any] = 0 if dy < 0 else dy A_ : Optional[int] = max(-dx , 0 ) A_ : List[str] = max(-dy , 0 ) # import pdb # pdb.set_trace() A_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowercase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand A_ : Any = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler A_ : Tuple = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] A_ : Dict = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) A_ : Any = {} if accepts_eta: A_ : Optional[int] = eta for i, t in enumerate(self.progress_bar(lowercase ) ): # expand the latents if we are doing classifier free guidance A_ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A_ : Tuple = self.scheduler.scale_model_input(lowercase , lowercase ) # predict the noise residual A_ : List[str] = self.unet(lowercase , lowercase , encoder_hidden_states=lowercase ).sample # perform guidance if do_classifier_free_guidance: A_, A_ : str = noise_pred.chunk(2 ) A_ : Optional[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 A_ : List[str] = self.scheduler.step(lowercase , lowercase , lowercase , **lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase , lowercase , lowercase ) A_ : List[str] = 1 / 0.1_82_15 * latents A_ : List[str] = self.vae.decode(lowercase ).sample A_ : Optional[int] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: A_ : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(lowercase ) , return_tensors="""pt""" ).to( self.device ) A_, A_ : Optional[int] = self.safety_checker( images=lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: A_ : Tuple = None if output_type == "pil": A_ : Tuple = self.numpy_to_pil(lowercase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowercase , nsfw_content_detected=lowercase )
206
1
__UpperCAmelCase : int = [ "Audio", "Array2D", "Array3D", "Array4D", "Array5D", "ClassLabel", "Features", "Sequence", "Value", "Image", "Translation", "TranslationVariableLanguages", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
315
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( _a): '''simple docstring''' def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ): """simple docstring""" super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = eval_examples UpperCamelCase : Optional[Any] = post_process_function def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ): """simple docstring""" UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE ) UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Any = self.compute_metrics UpperCamelCase : List[Any] = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Dict = time.time() try: UpperCamelCase : str = eval_loop( __SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : Union[str, Any] = compute_metrics UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions ) UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) else: UpperCamelCase : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__SCREAMING_SNAKE_CASE ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE ) return metrics def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ): """simple docstring""" UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE ) # Temporarily disable metric computation, we will do it in the loop here. UpperCamelCase : Union[str, Any] = self.compute_metrics UpperCamelCase : Tuple = None UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop UpperCamelCase : Optional[int] = time.time() try: UpperCamelCase : int = eval_loop( __SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , ) finally: UpperCamelCase : int = compute_metrics UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' ) UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
315
1
class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Any = name snake_case_ : int = value snake_case_ : Optional[int] = weight def __repr__(self ) -> Dict: '''simple docstring''' return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return self.value def lowerCamelCase (self ) -> List[str]: '''simple docstring''' return self.name def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' return self.weight def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return self.value / self.weight def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : Tuple = [] for i in range(len(_UpperCamelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Union[str, Any] = sorted(_UpperCamelCase , key=_UpperCamelCase , reverse=_UpperCamelCase ) snake_case_ : Any = [] snake_case_ , snake_case_ : List[Any] = 0.0, 0.0 for i in range(len(_UpperCamelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
279
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class __lowerCAmelCase ( _a ): lowerCamelCase_ : Any = '''megatron-bert''' def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any: '''simple docstring''' super().__init__(pad_token_id=__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : Dict = num_hidden_layers snake_case_ : Optional[int] = num_attention_heads snake_case_ : int = hidden_act snake_case_ : List[str] = intermediate_size snake_case_ : Dict = hidden_dropout_prob snake_case_ : str = attention_probs_dropout_prob snake_case_ : str = max_position_embeddings snake_case_ : Any = type_vocab_size snake_case_ : int = initializer_range snake_case_ : int = layer_norm_eps snake_case_ : List[str] = position_embedding_type snake_case_ : Dict = use_cache
279
1
import os from typing import Dict, List, Tuple, TypeVar, Union SCREAMING_SNAKE_CASE__ = TypeVar("""T""") SCREAMING_SNAKE_CASE__ = Union[List[T], Tuple[T, ...]] SCREAMING_SNAKE_CASE__ = Union[T, List[T], Dict[str, T]] SCREAMING_SNAKE_CASE__ = Union[str, bytes, os.PathLike]
356
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class __lowerCamelCase ( snake_case_ ): """simple docstring""" def __init__( self , UpperCAmelCase ) -> Any: '''simple docstring''' lowercase_ = data def __iter__( self ) -> List[str]: '''simple docstring''' for element in self.data: yield element def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any]=True ): '''simple docstring''' lowercase_ = Accelerator(even_batches=__lowerCamelCase ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: bool = False ): '''simple docstring''' if iterable: lowercase_ = DummyIterableDataset(torch.as_tensor(range(__lowerCamelCase ) ) ) else: lowercase_ = TensorDataset(torch.as_tensor(range(__lowerCamelCase ) ) ) lowercase_ = DataLoader(__lowerCamelCase , batch_size=__lowerCamelCase ) lowercase_ = accelerator.prepare(__lowerCamelCase ) return dl def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Accelerator , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: List[int] , __lowerCamelCase: List[int] , ): '''simple docstring''' lowercase_ = create_dataloader(accelerator=__lowerCamelCase , dataset_size=__lowerCamelCase , batch_size=__lowerCamelCase ) lowercase_ = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( __lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( __lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = create_accelerator(even_batches=__lowerCamelCase ) verify_dataloader_batch_sizes( __lowerCamelCase , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( __lowerCamelCase , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = create_accelerator(even_batches=__lowerCamelCase ) lowercase_ = torch.nn.Linear(1 , 1 ) lowercase_ = accelerator.prepare(__lowerCamelCase ) lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 ) lowercase_ = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(__lowerCamelCase ): lowercase_ = ddp_model(batch[0].float() ) lowercase_ = output.sum() loss.backward() batch_idxs.append(__lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ): '''simple docstring''' with warnings.catch_warnings(record=__lowerCamelCase ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , __lowerCamelCase ) assert "only supported for multi-GPU" in str(w[-1].message ) def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = True lowercase_ = False lowercase_ = create_accelerator(even_batches=__lowerCamelCase ) lowercase_ = torch.nn.Linear(1 , 1 ) lowercase_ = accelerator.prepare(__lowerCamelCase ) lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 ) lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ): lowercase_ = train_dl.batch_sampler.even_batches lowercase_ = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = True lowercase_ = False lowercase_ = create_accelerator(even_batches=__lowerCamelCase ) lowercase_ = torch.nn.Linear(1 , 1 ) lowercase_ = accelerator.prepare(__lowerCamelCase ) create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase ) lowercase_ = create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("ignore" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ): lowercase_ = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = create_accelerator() lowercase_ = torch.nn.Linear(1 , 1 ) lowercase_ = accelerator.prepare(__lowerCamelCase ) create_dataloader(__lowerCamelCase , dataset_size=3 , batch_size=1 , iterable=__lowerCamelCase ) with warnings.catch_warnings(record=__lowerCamelCase ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=__lowerCamelCase ): pass assert issubclass(w[-1].category , __lowerCamelCase ) assert "only supported for map-style datasets" in str(w[-1].message ) def SCREAMING_SNAKE_CASE_ ( ): '''simple docstring''' lowercase_ = create_accelerator() accelerator.print("Test that even_batches variable ensures uniform batches across processes" ) test_default_ensures_even_batch_sizes() accelerator.print("Run tests with even_batches disabled" ) test_can_disable_even_batches() accelerator.print("Test joining uneven inputs" ) test_can_join_uneven_inputs() accelerator.print("Test overriding even_batches when joining uneven inputs" ) test_join_can_override_even_batches() accelerator.print("Test overriding even_batches for mixed dataloader types" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("Test overriding even_batches raises a warning for iterable dataloaders" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("Test join with non DDP distributed raises warning" ) lowercase_ = accelerator.state.distributed_type lowercase_ = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(__lowerCamelCase ) lowercase_ = original_state if __name__ == "__main__": main()
297
0
import argparse import glob import logging import os from argparse import Namespace from importlib import import_module import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch.nn import CrossEntropyLoss from torch.utils.data import DataLoader, TensorDataset from utils_ner import TokenClassificationTask __lowerCamelCase : Tuple = logging.getLogger(__name__) class __snake_case ( lowerCamelCase_ ): lowerCAmelCase_ = "token-classification" def __init__( self : Optional[int] , _lowercase : Union[str, Any] ): """simple docstring""" if type(_lowercase ) == dict: SCREAMING_SNAKE_CASE__ = Namespace(**_lowercase ) SCREAMING_SNAKE_CASE__ = import_module("""tasks""" ) try: SCREAMING_SNAKE_CASE__ = getattr(_lowercase , hparams.task_type ) SCREAMING_SNAKE_CASE__ = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) SCREAMING_SNAKE_CASE__ = self.token_classification_task.get_labels(hparams.labels ) SCREAMING_SNAKE_CASE__ = CrossEntropyLoss().ignore_index super().__init__(_lowercase , len(self.labels ) , self.mode ) def __a ( self : Optional[Any] , **_lowercase : str ): """simple docstring""" return self.model(**_lowercase ) def __a ( self : Any , _lowercase : Dict , _lowercase : Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE__ = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE__ = self(**_lowercase ) SCREAMING_SNAKE_CASE__ = outputs[0] # tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]} return {"loss": loss} def __a ( self : List[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self.hparams for mode in ["train", "dev", "test"]: SCREAMING_SNAKE_CASE__ = self._feature_file(_lowercase ) if os.path.exists(_lowercase ) and not args.overwrite_cache: logger.info("""Loading features from cached file %s""" , _lowercase ) SCREAMING_SNAKE_CASE__ = torch.load(_lowercase ) else: logger.info("""Creating features from dataset file at %s""" , args.data_dir ) SCREAMING_SNAKE_CASE__ = self.token_classification_task.read_examples_from_file(args.data_dir , _lowercase ) SCREAMING_SNAKE_CASE__ = self.token_classification_task.convert_examples_to_features( _lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , ) logger.info("""Saving features into cached file %s""" , _lowercase ) torch.save(_lowercase , _lowercase ) def __a ( self : str , _lowercase : int , _lowercase : int , _lowercase : bool = False ): """simple docstring""" SCREAMING_SNAKE_CASE__ = self._feature_file(_lowercase ) logger.info("""Loading features from cached file %s""" , _lowercase ) SCREAMING_SNAKE_CASE__ = torch.load(_lowercase ) SCREAMING_SNAKE_CASE__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) SCREAMING_SNAKE_CASE__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) if features[0].token_type_ids is not None: SCREAMING_SNAKE_CASE__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) else: SCREAMING_SNAKE_CASE__ = torch.tensor([0 for f in features] , dtype=torch.long ) # HACK(we will not use this anymore soon) SCREAMING_SNAKE_CASE__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long ) return DataLoader( TensorDataset(_lowercase , _lowercase , _lowercase , _lowercase ) , batch_size=_lowercase ) def __a ( self : Union[str, Any] , _lowercase : int , _lowercase : Tuple ): """simple docstring""" """Compute validation""" "" SCREAMING_SNAKE_CASE__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]} if self.config.model_type != "distilbert": SCREAMING_SNAKE_CASE__ = ( batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None ) # XLM and RoBERTa don"t use token_type_ids SCREAMING_SNAKE_CASE__ = self(**_lowercase ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = outputs[:2] SCREAMING_SNAKE_CASE__ = logits.detach().cpu().numpy() SCREAMING_SNAKE_CASE__ = inputs["""labels"""].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __a ( self : Dict , _lowercase : str ): """simple docstring""" SCREAMING_SNAKE_CASE__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean() SCREAMING_SNAKE_CASE__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE__ = np.argmax(_lowercase , axis=2 ) SCREAMING_SNAKE_CASE__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 ) SCREAMING_SNAKE_CASE__ = dict(enumerate(self.labels ) ) SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )] SCREAMING_SNAKE_CASE__ = [[] for _ in range(out_label_ids.shape[0] )] for i in range(out_label_ids.shape[0] ): for j in range(out_label_ids.shape[1] ): if out_label_ids[i, j] != self.pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) SCREAMING_SNAKE_CASE__ = { """val_loss""": val_loss_mean, """accuracy_score""": accuracy_score(_lowercase , _lowercase ), """precision""": precision_score(_lowercase , _lowercase ), """recall""": recall_score(_lowercase , _lowercase ), """f1""": fa_score(_lowercase , _lowercase ), } SCREAMING_SNAKE_CASE__ = dict(results.items() ) SCREAMING_SNAKE_CASE__ = results return ret, preds_list, out_label_list def __a ( self : Union[str, Any] , _lowercase : Optional[Any] ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(_lowercase ) SCREAMING_SNAKE_CASE__ = ret["""log"""] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __a ( self : Optional[int] , _lowercase : Any ): """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._eval_end(_lowercase ) # Converting to the dict required by pl # https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\ # pytorch_lightning/trainer/logging.py#L139 SCREAMING_SNAKE_CASE__ = ret["""log"""] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __a ( _lowercase : Optional[Any] , _lowercase : Optional[int] ): """simple docstring""" BaseTransformer.add_model_specific_args(_lowercase , _lowercase ) parser.add_argument( """--task_type""" , default="""NER""" , type=_lowercase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" ) parser.add_argument( """--max_seq_length""" , default=1_28 , type=_lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--labels""" , default="""""" , type=_lowercase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , ) parser.add_argument( """--gpus""" , default=0 , type=_lowercase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , ) parser.add_argument( """--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" ) return parser if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() add_generic_args(parser, os.getcwd()) __lowerCamelCase : List[str] = NERTransformer.add_model_specific_args(parser, os.getcwd()) __lowerCamelCase : Any = parser.parse_args() __lowerCamelCase : Dict = NERTransformer(args) __lowerCamelCase : Optional[Any] = generic_train(model, args) if args.do_predict: # See https://github.com/huggingface/transformers/issues/3159 # pl use this default format to create a checkpoint: # https://github.com/PyTorchLightning/pytorch-lightning/blob/master\ # /pytorch_lightning/callbacks/model_checkpoint.py#L322 __lowerCamelCase : Tuple = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True)) __lowerCamelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1]) trainer.test(model)
219
import socket def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) SCREAMING_SNAKE_CASE__ = socket.gethostname() SCREAMING_SNAKE_CASE__ = 1_23_12 sock.connect((host, port) ) sock.send(B"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: SCREAMING_SNAKE_CASE__ = sock.recv(10_24 ) if not data: break out_file.write(__UpperCamelCase ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
219
1
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase : List[Any] = logging.get_logger(__name__) class lowerCamelCase__ ( __lowercase): '''simple docstring''' _A = ['input_values', 'attention_mask'] def __init__( self :List[Any] , a :int = 1 , a :int = 1_6_0_0_0 , a :float = 0.0 , a :bool = False , a :int = 8_0 , a :int = 1_6 , a :int = 6_4 , a :str = "hann_window" , a :float = 1.0 , a :float = 8_0 , a :float = 7_6_0_0 , a :float = 1E-1_0 , a :int = 2 , a :bool = True , **a :int , ) -> List[Any]: super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a ) __UpperCamelCase : List[str] = do_normalize __UpperCamelCase : List[Any] = return_attention_mask __UpperCamelCase : str = num_mel_bins __UpperCamelCase : Optional[Any] = hop_length __UpperCamelCase : Any = win_length __UpperCamelCase : str = win_function __UpperCamelCase : Tuple = frame_signal_scale __UpperCamelCase : Tuple = fmin __UpperCamelCase : Union[str, Any] = fmax __UpperCamelCase : Optional[Any] = mel_floor __UpperCamelCase : Dict = reduction_factor __UpperCamelCase : List[Any] = win_length * sampling_rate // 1_0_0_0 __UpperCamelCase : List[Any] = hop_length * sampling_rate // 1_0_0_0 __UpperCamelCase : str = optimal_fft_length(self.sample_size ) __UpperCamelCase : Optional[int] = (self.n_fft // 2) + 1 __UpperCamelCase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=a ) __UpperCamelCase : int = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , ) if frame_signal_scale != 1.0: warnings.warn( "The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , a , ) if reduction_factor != 2.0: warnings.warn( "The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , a , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _lowerCamelCase ( a :List[np.ndarray] , a :List[np.ndarray] , a :float = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: __UpperCamelCase : Dict = np.array(a , np.intaa ) __UpperCamelCase : List[Any] = [] for vector, length in zip(a , attention_mask.sum(-1 ) ): __UpperCamelCase : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: __UpperCamelCase : List[Any] = padding_value normed_input_values.append(a ) else: __UpperCamelCase : Optional[int] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def _lowerCamelCase ( self :int , a :np.ndarray , ) -> np.ndarray: __UpperCamelCase : Dict = spectrogram( a , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , ) return log_mel_spec.T def __call__( self :Optional[Any] , a :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , a :Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , a :Union[bool, str, PaddingStrategy] = False , a :Optional[int] = None , a :bool = False , a :Optional[int] = None , a :Optional[bool] = None , a :Optional[Union[str, TensorType]] = None , a :Optional[int] = None , **a :List[Any] , ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError("You must provide either `audio` or `audio_target` values." ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( "It is strongly recommended to pass the ``sampling_rate`` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if audio is not None: __UpperCamelCase : Union[str, Any] = self._process_audio( a , a , a , a , a , a , a , a , **a , ) else: __UpperCamelCase : List[Any] = None if audio_target is not None: __UpperCamelCase : Optional[Any] = self._process_audio( a , a , a , a , a , a , a , a , **a , ) if inputs is None: return inputs_target else: __UpperCamelCase : Dict = inputs_target["input_values"] __UpperCamelCase : str = inputs_target.get("attention_mask" ) if decoder_attention_mask is not None: __UpperCamelCase : List[str] = decoder_attention_mask return inputs def _lowerCamelCase ( self :Dict , a :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a :bool = False , a :Union[bool, str, PaddingStrategy] = False , a :Optional[int] = None , a :bool = False , a :Optional[int] = None , a :Optional[bool] = None , a :Optional[Union[str, TensorType]] = None , **a :Any , ) -> BatchFeature: __UpperCamelCase : Any = isinstance(a , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) __UpperCamelCase : str = is_batched_numpy or ( isinstance(a , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __UpperCamelCase : Dict = [np.asarray(a , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(a , np.ndarray ): __UpperCamelCase : int = np.asarray(a , dtype=np.floataa ) elif isinstance(a , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): __UpperCamelCase : List[str] = speech.astype(np.floataa ) # always return batch if not is_batched: __UpperCamelCase : List[str] = [speech] # needed to make pad() work on spectrogram inputs __UpperCamelCase : Dict = self.feature_size # convert into correct format for padding if is_target: __UpperCamelCase : Tuple = [self._extract_mel_features(a ) for waveform in speech] __UpperCamelCase : Tuple = BatchFeature({"input_values": features} ) __UpperCamelCase : Optional[Any] = self.num_mel_bins else: __UpperCamelCase : Any = BatchFeature({"input_values": speech} ) __UpperCamelCase : List[Any] = self.pad( a , padding=a , max_length=a , truncation=a , pad_to_multiple_of=a , return_attention_mask=a , **a , ) __UpperCamelCase : Any = feature_size_hack # convert input values to correct format __UpperCamelCase : Union[str, Any] = padded_inputs["input_values"] if not isinstance(input_values[0] , np.ndarray ): __UpperCamelCase : List[str] = [np.asarray(a , dtype=np.floataa ) for array in input_values] elif ( not isinstance(a , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): __UpperCamelCase : List[str] = [array.astype(np.floataa ) for array in input_values] elif isinstance(a , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): __UpperCamelCase : Union[str, Any] = input_values.astype(np.floataa ) # convert attention_mask to correct format __UpperCamelCase : str = padded_inputs.get("attention_mask" ) if attention_mask is not None: __UpperCamelCase : List[str] = [np.asarray(a , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: __UpperCamelCase : Dict = ( attention_mask if self._get_padding_strategies(a , max_length=a ) is not PaddingStrategy.DO_NOT_PAD else None ) __UpperCamelCase : Any = self.zero_mean_unit_var_norm( padded_inputs["input_values"] , attention_mask=a , padding_value=self.padding_value ) if return_tensors is not None: __UpperCamelCase : Any = padded_inputs.convert_to_tensors(a ) return padded_inputs def _lowerCamelCase ( self :Any ) -> Dict[str, Any]: __UpperCamelCase : Optional[Any] = super().to_dict() # Don't serialize these as they are derived from the other properties. __UpperCamelCase : Tuple = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"] for name in names: if name in output: del output[name] return output
151
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : Union[str, Any] = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : List[Any] = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
151
1
a_ :Union[str, Any] = 8.3_14_45_98 def lowercase_ (A : float , A : float ): if temperature < 0: raise Exception('Temperature cannot be less than 0 K' ) if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a_ :Optional[int] = 300 a_ :List[Any] = 28 a_ :Union[str, Any] = rms_speed_of_molecule(temperature, molar_mass) print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
277
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class snake_case__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[int], _snake_case : List[Any], _snake_case : str=7, _snake_case : Tuple=3, _snake_case : List[str]=3_0, _snake_case : Tuple=4_0_0, _snake_case : Any=True, _snake_case : List[Any]=None, _snake_case : int=0.9, _snake_case : Optional[Any]=None, _snake_case : str=True, _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], ) ->List[Any]: snake_case__ : int = size if size is not None else {'shortest_edge': 3_0} snake_case__ : Tuple = crop_size if crop_size is not None else {'height': 3_0, 'width': 3_0} snake_case__ : Union[str, Any] = parent snake_case__ : Dict = batch_size snake_case__ : int = num_channels snake_case__ : Tuple = min_resolution snake_case__ : Any = max_resolution snake_case__ : List[Any] = do_resize_and_center_crop snake_case__ : str = size snake_case__ : str = crop_pct snake_case__ : List[str] = crop_size snake_case__ : Optional[int] = do_normalize snake_case__ : Tuple = image_mean snake_case__ : Tuple = image_std def lowercase_ ( self : Optional[int] ) ->int: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None def lowercase_ ( self : Union[str, Any] ) ->Dict: snake_case__ : Union[str, Any] = PoolFormerImageProcessingTester(self ) @property def lowercase_ ( self : int ) ->Dict: return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : Union[str, Any] ) ->Optional[int]: snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_snake_case, 'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(_snake_case, 'size' ) ) self.assertTrue(hasattr(_snake_case, 'crop_pct' ) ) self.assertTrue(hasattr(_snake_case, 'do_normalize' ) ) self.assertTrue(hasattr(_snake_case, 'image_mean' ) ) self.assertTrue(hasattr(_snake_case, 'image_std' ) ) def lowercase_ ( self : List[str] ) ->List[str]: snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 3_0} ) self.assertEqual(image_processor.crop_size, {'height': 3_0, 'width': 3_0} ) snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 ) self.assertEqual(image_processor.size, {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} ) def lowercase_ ( self : List[Any] ) ->List[Any]: pass def lowercase_ ( self : List[str] ) ->str: # Initialize image_processing snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case, Image.Image ) # Test not batched input snake_case__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case__ : str = image_processing(_snake_case, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def lowercase_ ( self : int ) ->List[Any]: # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case, np.ndarray ) # Test not batched input snake_case__ : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case__ : List[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def lowercase_ ( self : List[str] ) ->List[str]: # Initialize image_processing snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case ) for image in image_inputs: self.assertIsInstance(_snake_case, torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case__ : Optional[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
277
1
from __future__ import annotations from typing import Any class __A : def __init__( self : Optional[Any] , UpperCAmelCase_ : int ): lowerCAmelCase : Tuple = num_of_nodes lowerCAmelCase : list[list[int]] = [] lowerCAmelCase : dict[int, int] = {} def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): self.m_edges.append([u_node, v_node, weight] ) def lowercase__ ( self : Dict , UpperCAmelCase_ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ ) def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): if component_size[u_node] <= component_size[v_node]: lowerCAmelCase : Optional[int] = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCAmelCase_ ) elif component_size[u_node] >= component_size[v_node]: lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCAmelCase_ ) def lowercase__ ( self : str ): lowerCAmelCase : str = [] lowerCAmelCase : Tuple = 0 lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) lowerCAmelCase : int = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge lowerCAmelCase : Optional[int] = self.m_component[u] lowerCAmelCase : str = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): lowerCAmelCase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge lowerCAmelCase : Optional[Any] = self.m_component[u] lowerCAmelCase : Optional[Any] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" ) num_of_components -= 1 lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes print(f"The total weight of the minimal spanning tree is: {mst_weight}" ) def SCREAMING_SNAKE_CASE__ ( ) -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
323
import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> List[Any]: '''simple docstring''' return x + 2 class __A ( unittest.TestCase ): def lowercase__ ( self : int ): lowerCAmelCase : List[str] = 'x = 3' lowerCAmelCase : Optional[Any] = {} lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) assert result == 3 self.assertDictEqual(UpperCAmelCase_ , {'x': 3} ) lowerCAmelCase : Dict = 'x = y' lowerCAmelCase : List[Any] = {'y': 5} lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCAmelCase_ , {'x': 5, 'y': 5} ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : Any = 'y = add_two(x)' lowerCAmelCase : int = {'x': 3} lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ ) assert result == 5 self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} ) # Won't work without the tool with CaptureStdout() as out: lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) assert result is None assert "tried to execute add_two" in out.out def lowercase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = 'x = 3' lowerCAmelCase : List[Any] = {} lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) assert result == 3 self.assertDictEqual(UpperCAmelCase_ , {'x': 3} ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : List[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}' lowerCAmelCase : Dict = {'x': 3} lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ ) self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} ) self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def lowercase__ ( self : Any ): lowerCAmelCase : Union[str, Any] = 'x = 3\ny = 5' lowerCAmelCase : str = {} lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 5} ) def lowercase__ ( self : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = 'text = f\'This is x: {x}.\'' lowerCAmelCase : str = {'x': 3} lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'text': 'This is x: 3.'} ) def lowercase__ ( self : Dict ): lowerCAmelCase : Optional[Any] = 'if x <= 3:\n y = 2\nelse:\n y = 5' lowerCAmelCase : Dict = {'x': 3} lowerCAmelCase : int = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 2} ) lowerCAmelCase : Any = {'x': 8} lowerCAmelCase : Optional[int] = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(UpperCAmelCase_ , {'x': 8, 'y': 5} ) def lowercase__ ( self : List[Any] ): lowerCAmelCase : int = 'test_list = [x, add_two(x)]' lowerCAmelCase : Optional[Any] = {'x': 3} lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , [3, 5] ) self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} ) def lowercase__ ( self : Optional[Any] ): lowerCAmelCase : int = 'y = x' lowerCAmelCase : Optional[int] = {'x': 3} lowerCAmelCase : Tuple = evaluate(UpperCAmelCase_ , {} , state=UpperCAmelCase_ ) assert result == 3 self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'y': 3} ) def lowercase__ ( self : List[str] ): lowerCAmelCase : Dict = 'test_list = [x, add_two(x)]\ntest_list[1]' lowerCAmelCase : List[str] = {'x': 3} lowerCAmelCase : List[str] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ ) assert result == 5 self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_list': [3, 5]} ) lowerCAmelCase : Optional[Any] = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']' lowerCAmelCase : List[Any] = {'x': 3} lowerCAmelCase : Optional[Any] = evaluate(UpperCAmelCase_ , {'add_two': add_two} , state=UpperCAmelCase_ ) assert result == 5 self.assertDictEqual(UpperCAmelCase_ , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} ) def lowercase__ ( self : int ): lowerCAmelCase : Any = 'x = 0\nfor i in range(3):\n x = i' lowerCAmelCase : str = {} lowerCAmelCase : Dict = evaluate(UpperCAmelCase_ , {'range': range} , state=UpperCAmelCase_ ) assert result == 2 self.assertDictEqual(UpperCAmelCase_ , {'x': 2, 'i': 2} )
323
1
"""simple docstring""" import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __lowercase = """sshleifer/bart-tiny-random""" __lowercase = """patrickvonplaten/t5-tiny-random""" @require_torch class _A ( unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : Any): return AutoConfig.from_pretrained(__UpperCAmelCase) def __snake_case ( self : str): a , *a : Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.num_hidden_layers , 1) def __snake_case ( self : List[str]): a , *a : Union[str, Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase) def __snake_case ( self : Union[str, Any]): a , *a : Optional[int] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=__UpperCAmelCase) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers) def __snake_case ( self : List[str]): a , *a : Union[str, Any] = create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1) self.assertEqual(student.config.encoder_layers , 1) self.assertEqual(student.config.decoder_layers , 1) def __snake_case ( self : List[Any]): with self.assertRaises(__UpperCAmelCase): create_student_by_copying_alternating_layers(__UpperCAmelCase , tempfile.mkdtemp() , e=__UpperCAmelCase , d=__UpperCAmelCase)
40
"""simple docstring""" import unittest from knapsack import knapsack as k class _A ( unittest.TestCase ): """simple docstring""" def __snake_case ( self : List[Any]): a : str = 0 a : Optional[int] = [0] a : Union[str, Any] = [0] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) a : List[str] = [60] a : str = [10] a : Optional[int] = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0) def __snake_case ( self : Optional[int]): a : Any = 3 a : str = [1, 2, 3] a : Tuple = [3, 2, 1] a : Any = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5) def __snake_case ( self : Tuple): a : int = 50 a : List[Any] = [60, 100, 120] a : Optional[int] = [10, 20, 30] a : str = len(__UpperCAmelCase) self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220) if __name__ == "__main__": unittest.main()
40
1
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __lowerCamelCase : Optional[int] = """\ """ __lowerCamelCase : Union[str, Any] = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ __lowerCamelCase : Any = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Optional[int] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "input_texts": datasets.Value("string" ), } ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , ) def _lowercase ( self : Dict , __A : Dict , __A : List[str] , __A : int = 1_6 , __A : bool = True , __A : Dict=None ): if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": snake_case__ : Tuple = "cuda" else: snake_case__ : Optional[Any] = "cuda" if torch.cuda.is_available() else "cpu" snake_case__ : List[Any] = AutoModelForCausalLM.from_pretrained(__A ) snake_case__ : List[str] = model.to(__A ) snake_case__ : Optional[int] = AutoTokenizer.from_pretrained(__A ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: snake_case__ : Tuple = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__A ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" snake_case__ : Tuple = model.config.max_length - 1 else: snake_case__ : Any = model.config.max_length snake_case__ : Optional[int] = tokenizer( __A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , return_tensors="pt" , return_attention_mask=__A , ).to(__A ) snake_case__ : Union[str, Any] = encodings["input_ids"] snake_case__ : List[str] = encodings["attention_mask"] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." snake_case__ : Any = [] snake_case__ : Optional[Any] = CrossEntropyLoss(reduction="none" ) for start_index in logging.tqdm(range(0 , len(__A ) , __A ) ): snake_case__ : Tuple = min(start_index + batch_size , len(__A ) ) snake_case__ : List[Any] = encoded_texts[start_index:end_index] snake_case__ : str = attn_masks[start_index:end_index] if add_start_token: snake_case__ : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__A ) snake_case__ : int = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) snake_case__ : Dict = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__A ), attn_mask] , dim=1 ) snake_case__ : str = encoded_batch with torch.no_grad(): snake_case__ : Optional[int] = model(__A , attention_mask=__A ).logits snake_case__ : Optional[Any] = out_logits[..., :-1, :].contiguous() snake_case__ : List[Any] = labels[..., 1:].contiguous() snake_case__ : Any = attn_mask[..., 1:].contiguous() snake_case__ : Dict = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __A ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__A )}
286
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Union[str, Any] ): # Initialise PyTorch model snake_case__ : List[str] = MobileBertConfig.from_json_file(snake_case_ ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case__ : Dict = MobileBertForPreTraining(snake_case_ ) # Load weights from tf checkpoint snake_case__ : Any = load_tf_weights_in_mobilebert(snake_case_ , snake_case_ , snake_case_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , snake_case_ ) if __name__ == "__main__": __lowerCamelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--mobilebert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained MobileBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __lowerCamelCase : int = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
286
1
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =len(lowerCAmelCase_ ), len(grid[0] ) if ( min(lowerCAmelCase_, lowerCAmelCase_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) SCREAMING_SNAKE_CASE =0 count += depth_first_search(lowerCAmelCase_, row + 1, lowerCAmelCase_, lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_, row - 1, lowerCAmelCase_, lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_, lowerCAmelCase_, col + 1, lowerCAmelCase_ ) count += depth_first_search(lowerCAmelCase_, lowerCAmelCase_, col - 1, lowerCAmelCase_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
334
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase =logging.get_logger() @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) def _lowerCAmelCase ( self : List[Any] ,snake_case : Dict ,snake_case : Tensor ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =len(list(m.modules() ) ) == 1 or isinstance(snake_case ,nn.Convad ) or isinstance(snake_case ,nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self : List[str] ,snake_case : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def _lowerCAmelCase ( self : Optional[Any] ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 ,self.traced ) ) @dataclass class a_ : """simple docstring""" __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 1 __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = field(default_factory=lowerCamelCase_ ) __UpperCAmelCase = True def __call__( self : str ,snake_case : Tensor ): SCREAMING_SNAKE_CASE =Tracker(self.dest )(snake_case ).parametrized SCREAMING_SNAKE_CASE =Tracker(self.src )(snake_case ).parametrized SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.src_skip ,snake_case ) ) SCREAMING_SNAKE_CASE =list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip ,snake_case ) ) if len(snake_case ) != len(snake_case ) and self.raise_if_mismatch: raise Exception( f'Numbers of operations are different. Source module has {len(snake_case )} operations while' f' destination module has {len(snake_case )}.' ) for dest_m, src_m in zip(snake_case ,snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) class a_ ( nn.Module ): """simple docstring""" def __init__( self : Any ,snake_case : nn.Module ): super().__init__() SCREAMING_SNAKE_CASE =[] # - get the stem feature_blocks.append(('conv1', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('block' ), f'Unexpected layer name {k}' SCREAMING_SNAKE_CASE =len(snake_case ) + 1 feature_blocks.append((f'res{block_index}', v) ) SCREAMING_SNAKE_CASE =nn.ModuleDict(snake_case ) def _lowerCAmelCase ( self : Dict ,snake_case : Tensor ): return get_trunk_forward_outputs( snake_case ,out_feat_keys=snake_case ,feature_blocks=self._feature_blocks ,) class a_ ( lowerCamelCase_ ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] ,snake_case : str ): SCREAMING_SNAKE_CASE =x.split('-' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Optional[Any] ,snake_case : str ): # default to timm! if x not in self: SCREAMING_SNAKE_CASE =self.convert_name_to_timm(snake_case ) SCREAMING_SNAKE_CASE =partial(lambda: (timm.create_model(snake_case ,pretrained=snake_case ).eval(), None) ) else: SCREAMING_SNAKE_CASE =super().__getitem__(snake_case ) return val class a_ ( lowerCamelCase_ ): """simple docstring""" def __getitem__( self : int ,snake_case : str ): if "seer" in x and "in1k" not in x: SCREAMING_SNAKE_CASE =RegNetModel else: SCREAMING_SNAKE_CASE =RegNetForImageClassification return val def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ): """simple docstring""" for from_key, to_key in keys: SCREAMING_SNAKE_CASE =from_state_dict[from_key].clone() print(F'Copied key={from_key} to={to_key}' ) return to_state_dict def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = True, ): """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =from_model_func() SCREAMING_SNAKE_CASE =our_model_func(lowerCAmelCase_ ).eval() SCREAMING_SNAKE_CASE =ModuleTransfer(src=lowerCAmelCase_, dest=lowerCAmelCase_, raise_if_mismatch=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =torch.randn((1, 3, 224, 224) ) module_transfer(lowerCAmelCase_ ) if from_state_dict is not None: SCREAMING_SNAKE_CASE =[] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE =[('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] SCREAMING_SNAKE_CASE =manually_copy_vissl_head(lowerCAmelCase_, our_model.state_dict(), lowerCAmelCase_ ) our_model.load_state_dict(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =our_model(lowerCAmelCase_, output_hidden_states=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =( our_outputs.logits if isinstance(lowerCAmelCase_, lowerCAmelCase_ ) else our_outputs.last_hidden_state ) SCREAMING_SNAKE_CASE =from_model(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE =from_output[-1] if type(lowerCAmelCase_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: SCREAMING_SNAKE_CASE =our_outputs.hidden_states[-1] assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add model', use_temp_dir=lowerCAmelCase_, ) SCREAMING_SNAKE_CASE =224 if 'seer' not in name else 384 # we can use the convnext one SCREAMING_SNAKE_CASE =AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k', size=lowerCAmelCase_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message='Add image processor', use_temp_dir=lowerCAmelCase_, ) print(F'Pushed {name}' ) def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ = None, lowerCAmelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE ='imagenet-1k-id2label.json' SCREAMING_SNAKE_CASE =1000 SCREAMING_SNAKE_CASE =(1, num_labels) SCREAMING_SNAKE_CASE ='huggingface/label-files' SCREAMING_SNAKE_CASE =num_labels SCREAMING_SNAKE_CASE =json.load(open(cached_download(hf_hub_url(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ) ), 'r' ) ) SCREAMING_SNAKE_CASE ={int(lowerCAmelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =idalabel SCREAMING_SNAKE_CASE ={v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE =partial(lowerCAmelCase_, num_labels=lowerCAmelCase_, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_ ) SCREAMING_SNAKE_CASE ={ 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8, layer_type='x' ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 160, 384], groups_width=16, layer_type='x' ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 240, 528], groups_width=24, layer_type='x' ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[64, 128, 288, 672], groups_width=16, layer_type='x' ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2], hidden_sizes=[72, 168, 408, 912], groups_width=24, layer_type='x' ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2], hidden_sizes=[96, 192, 432, 1008], groups_width=48, layer_type='x' ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2], hidden_sizes=[80, 240, 560, 1360], groups_width=40, layer_type='x' ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 392, 784, 1624], groups_width=56, layer_type='x' ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1], hidden_sizes=[80, 240, 720, 1920], groups_width=120, layer_type='x' ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112, layer_type='x' ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1], hidden_sizes=[256, 512, 896, 2048], groups_width=128, layer_type='x' ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1], hidden_sizes=[336, 672, 1344, 2520], groups_width=168, layer_type='x' ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 152, 368], groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[48, 104, 208, 440], groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[48, 112, 256, 608], groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[64, 128, 320, 768], groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2], hidden_sizes=[48, 120, 336, 888], groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1], hidden_sizes=[72, 216, 576, 1512], groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2], hidden_sizes=[128, 192, 512, 1088], groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2], hidden_sizes=[144, 288, 576, 1296], groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[168, 448, 896, 2016], groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[224, 448, 896, 2240], groups_width=112 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1], hidden_sizes=[224, 448, 1232, 3024], groups_width=112 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[232, 696, 1392, 3712], groups_width=232 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[328, 984, 1968, 4920], groups_width=328 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[528, 1056, 2904, 7392], groups_width=264 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1], hidden_sizes=[640, 1696, 2544, 5088], groups_width=640 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[2020, 4040, 11110, 28280], groups_width=1010 ), } SCREAMING_SNAKE_CASE =NameToOurModelFuncMap() SCREAMING_SNAKE_CASE =NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowerCAmelCase_, lowerCAmelCase_ ) -> Tuple[nn.Module, Dict]: SCREAMING_SNAKE_CASE =torch.hub.load_state_dict_from_url(lowerCAmelCase_, model_dir=str(lowerCAmelCase_ ), map_location='cpu' ) SCREAMING_SNAKE_CASE =model_func() # check if we have a head, if yes add it SCREAMING_SNAKE_CASE =files['classy_state_dict']['base_model']['model'] SCREAMING_SNAKE_CASE =model_state_dict['trunk'] model.load_state_dict(lowerCAmelCase_ ) return model.eval(), model_state_dict["heads"] # pretrained SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), ) # IN1K finetuned SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch', lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) SCREAMING_SNAKE_CASE =partial( lowerCAmelCase_, 'https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch', lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=1010, w_a=1744, w_a=620.83, w_m=2.52 ) ) ), ) if model_name: convert_weight_and_push( lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], lowerCAmelCase_, lowerCAmelCase_, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowerCAmelCase_, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase =argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported regnet* architecture," " currently: regnetx-*, regnety-*. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) _lowerCamelCase =parser.parse_args() _lowerCamelCase =args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
334
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast snake_case__ = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCamelCase_ (datasets.BuilderConfig ): """simple docstring""" _lowerCAmelCase = 1_0_0_0_0 _lowerCAmelCase = None _lowerCAmelCase = None class UpperCamelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" _lowerCAmelCase = ParquetConfig def _a ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def _a ( self : str , _lowerCamelCase : Dict ): """simple docstring""" if not self.config.data_files: raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' ) A_ : List[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_lowerCamelCase , (str, list, tuple) ): A_ : Union[str, Any] = data_files if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A_ : str = [] for split_name, files in data_files.items(): if isinstance(_lowerCamelCase , _lowerCamelCase ): A_ : Union[str, Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[int] = [dl_manager.iter_files(_lowerCamelCase ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_lowerCamelCase ): with open(_lowerCamelCase , '''rb''' ) as f: A_ : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_lowerCamelCase ) ) break splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) ) return splits def _a ( self : Tuple , _lowerCamelCase : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A_ : Optional[Any] = table_cast(_lowerCamelCase , self.info.features.arrow_schema ) return pa_table def _a ( self : List[str] , _lowerCamelCase : Optional[int] ): """simple docstring""" A_ : List[str] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( f'Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ): with open(_lowerCamelCase , '''rb''' ) as f: A_ : List[str] = pq.ParquetFile(_lowerCamelCase ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): A_ : Any = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f'{file_idx}_{batch_idx}', self._cast_table(_lowerCamelCase ) except ValueError as e: logger.error(f'Failed to read file \'{file}\' with error {type(_lowerCamelCase )}: {e}' ) raise
368
'''simple docstring''' class UpperCamelCase_ : """simple docstring""" def __init__( self : Optional[Any] , _lowerCamelCase : Union[str, Any] ): """simple docstring""" A_ : Union[str, Any] = val A_ : Tuple = None A_ : Any = None def _a ( self : Tuple , _lowerCamelCase : List[Any] ): """simple docstring""" if self.val: if val < self.val: if self.left is None: A_ : int = Node(_lowerCamelCase ) else: self.left.insert(_lowerCamelCase ) elif val > self.val: if self.right is None: A_ : List[str] = Node(_lowerCamelCase ) else: self.right.insert(_lowerCamelCase ) else: A_ : Any = val def snake_case__ ( lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] ) -> str: # Recursive traversal if root: inorder(root.left , lowerCamelCase__ ) res.append(root.val ) inorder(root.right , lowerCamelCase__ ) def snake_case__ ( lowerCamelCase__ : Optional[int] ) -> Tuple: # Build BST if len(lowerCamelCase__ ) == 0: return arr A_ : Dict = Node(arr[0] ) for i in range(1 , len(lowerCamelCase__ ) ): root.insert(arr[i] ) # Traverse BST in order. A_ : Tuple = [] inorder(lowerCamelCase__ , lowerCamelCase__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
4
0
'''simple docstring''' import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __magic_name__( lowerCamelCase, lowerCamelCase=1_0): __lowerCAmelCase = [] for _ in range(lowerCamelCase): lrs.append(scheduler.get_lr()[0]) scheduler.step() return lrs def __magic_name__( lowerCamelCase, lowerCamelCase=1_0): __lowerCAmelCase = [] for step in range(lowerCamelCase): lrs.append(scheduler.get_lr()[0]) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: __lowerCAmelCase = os.path.join(lowerCamelCase, '''schedule.bin''') torch.save(scheduler.state_dict(), lowerCamelCase) __lowerCAmelCase = torch.load(lowerCamelCase) scheduler.load_state_dict(lowerCamelCase) return lrs @require_torch class a__ ( unittest.TestCase ): """simple docstring""" def _snake_case (self , __lowercase , __lowercase , __lowercase ): self.assertEqual(len(__lowercase ) , len(__lowercase ) ) for a, b in zip(__lowercase , __lowercase ): self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase ) def _snake_case (self ): __lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowercase ) __lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) __lowerCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __lowerCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(1_00 ): __lowerCAmelCase = criterion(__lowercase , __lowercase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def _snake_case (self ): __lowerCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__lowercase ) __lowerCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) __lowerCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping __lowerCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__lowercase , weight_decay=0.0 , relative_step=__lowercase , scale_parameter=__lowercase , warmup_init=__lowercase , ) for _ in range(10_00 ): __lowerCAmelCase = criterion(__lowercase , __lowercase ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class a__ ( unittest.TestCase ): """simple docstring""" __UpperCamelCase : Tuple = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase : Union[str, Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase : int = 10 def _snake_case (self , __lowercase , __lowercase , __lowercase , __lowercase=None ): self.assertEqual(len(__lowercase ) , len(__lowercase ) ) for a, b in zip(__lowercase , __lowercase ): self.assertAlmostEqual(__lowercase , __lowercase , delta=__lowercase , msg=__lowercase ) def _snake_case (self ): __lowerCAmelCase = {'''num_warmup_steps''': 2, '''num_training_steps''': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) __lowerCAmelCase = { get_constant_schedule: ({}, [1_0.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7}, [0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4], ), } for scheduler_func, data in scheds.items(): __lowerCAmelCase , __lowerCAmelCase = data __lowerCAmelCase = scheduler_func(self.optimizer , **__lowercase ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) __lowerCAmelCase = unwrap_schedule(__lowercase , self.num_steps ) self.assertListAlmostEqual( __lowercase , __lowercase , tol=1e-2 , msg=F"""failed for {scheduler_func} in normal scheduler""" , ) __lowerCAmelCase = scheduler_func(self.optimizer , **__lowercase ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(__lowercase ) # wrap to test picklability of the schedule __lowerCAmelCase = unwrap_and_save_reload_schedule(__lowercase , self.num_steps ) self.assertListEqual(__lowercase , __lowercase , msg=F"""failed for {scheduler_func} in save and reload""" ) class a__ : """simple docstring""" def __init__(self , __lowercase ): __lowerCAmelCase = fn def __call__(self , *__lowercase , **__lowercase ): return self.fn(*__lowercase , **__lowercase ) @classmethod def _snake_case (self , __lowercase ): __lowerCAmelCase = list(map(self , scheduler.lr_lambdas ) )
174
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : Tuple = """▁""" _UpperCAmelCase : str = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""} _UpperCAmelCase : Dict = { """vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""", }, """monolingual_vocab_file""": { """vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""", }, } _UpperCAmelCase : List[Any] = {"""vinai/bartpho-syllable""": 1_0_2_4} class a__ ( __A ): """simple docstring""" __UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES __UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : List[Any] = ['input_ids', 'attention_mask'] def __init__(self , __lowercase , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase = None , **__lowercase , ): # Mask token behave like a normal word, i.e. include the space before it __lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token __lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , ) __lowerCAmelCase = vocab_file __lowerCAmelCase = monolingual_vocab_file __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__lowercase ) ) # Load the reduced vocab # Keep order of special tokens for backward compatibility __lowerCAmelCase = {} __lowerCAmelCase = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(__lowercase ) not in self.fairseq_tokens_to_ids: __lowerCAmelCase = cnt cnt += 1 with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f: for line in f.readlines(): __lowerCAmelCase = line.strip().split()[0] __lowerCAmelCase = len(self.fairseq_tokens_to_ids ) if str(__lowercase ) not in self.fairseq_tokens_to_ids: __lowerCAmelCase = len(self.fairseq_tokens_to_ids ) __lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self ): __lowerCAmelCase = self.__dict__.copy() __lowerCAmelCase = None __lowerCAmelCase = self.sp_model.serialized_model_proto() return state def __setstate__(self , __lowercase ): __lowerCAmelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCAmelCase = {} __lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _snake_case (self , __lowercase , __lowercase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] __lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is None: return [1] + ([0] * len(__lowercase )) + [1] return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1] def _snake_case (self , __lowercase , __lowercase = None ): __lowerCAmelCase = [self.sep_token_id] __lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _snake_case (self ): return len(self.fairseq_ids_to_tokens ) def _snake_case (self ): __lowerCAmelCase = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _snake_case (self , __lowercase ): return self.sp_model.encode(__lowercase , out_type=__lowercase ) def _snake_case (self , __lowercase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _snake_case (self , __lowercase ): return self.fairseq_ids_to_tokens[index] def _snake_case (self , __lowercase ): __lowerCAmelCase = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip() return out_string def _snake_case (self , __lowercase , __lowercase = None ): if not os.path.isdir(__lowercase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCAmelCase = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowerCAmelCase = os.path.join( __lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __lowercase ) elif not os.path.isfile(self.vocab_file ): with open(__lowercase , '''wb''' ) as fi: __lowerCAmelCase = self.sp_model.serialized_model_proto() fi.write(__lowercase ) if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath( __lowercase ) and os.path.isfile(self.monolingual_vocab_file ): copyfile(self.monolingual_vocab_file , __lowercase ) elif not os.path.isfile(self.monolingual_vocab_file ): with open(__lowercase , '''w''' , encoding='''utf-8''' ) as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(F"""{str(__lowercase )} \n""" ) return out_vocab_file, out_monolingual_vocab_file
174
1
# This code is adapted from OpenAI's release # https://github.com/openai/human-eval/blob/master/human_eval/execution.py import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[Any]): '''simple docstring''' lowerCAmelCase__ : List[Any] = multiprocessing.Manager() lowerCAmelCase__ : Dict = manager.list() lowerCAmelCase__ : Optional[Any] = multiprocessing.Process(target=lowerCamelCase_ ,args=(check_program, result, timeout)) p.start() p.join(timeout=timeout + 1) if p.is_alive(): p.kill() if not result: result.append('''timed out''') return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Any): '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil lowerCAmelCase__ : List[Any] = shutil.rmtree lowerCAmelCase__ : Dict = os.rmdir lowerCAmelCase__ : Optional[Any] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: lowerCAmelCase__ : Any = {} with swallow_io(): with time_limit(lowerCamelCase_): exec(lowerCamelCase_ ,lowerCamelCase_) result.append('''passed''') except TimeoutException: result.append('''timed out''') except BaseException as e: result.append(f"""failed: {e}""") # Needed for cleaning up. lowerCAmelCase__ : List[Any] = rmtree lowerCAmelCase__ : List[Any] = rmdir lowerCAmelCase__ : Optional[int] = chdir @contextlib.contextmanager def lowerCAmelCase__ ( lowerCamelCase_ : List[str]): '''simple docstring''' def signal_handler(lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Dict): raise TimeoutException('''Timed out!''') signal.setitimer(signal.ITIMER_REAL ,lowerCamelCase_) signal.signal(signal.SIGALRM ,lowerCamelCase_) try: yield finally: signal.setitimer(signal.ITIMER_REAL ,0) @contextlib.contextmanager def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : int = WriteOnlyStringIO() with contextlib.redirect_stdout(lowerCamelCase_): with contextlib.redirect_stderr(lowerCamelCase_): with redirect_stdin(lowerCamelCase_): yield @contextlib.contextmanager def lowerCAmelCase__ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(lowerCamelCase_): yield dirname class lowerCamelCase__ ( _a): '''simple docstring''' pass class lowerCamelCase__ ( io.StringIO): '''simple docstring''' def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Tuple: """simple docstring""" raise OSError def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str: """simple docstring""" raise OSError def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str: """simple docstring""" raise OSError def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[int]: """simple docstring""" return False class lowerCamelCase__ ( contextlib._RedirectStream): # type: ignore '''simple docstring''' snake_case_ ="""stdin""" @contextlib.contextmanager def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]): '''simple docstring''' if root == ".": yield return lowerCAmelCase__ : List[str] = os.getcwd() os.chdir(lowerCamelCase_) try: yield except BaseException as exc: raise exc finally: os.chdir(lowerCamelCase_) def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]=None): '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS ,(maximum_memory_bytes, maximum_memory_bytes)) resource.setrlimit(resource.RLIMIT_DATA ,(maximum_memory_bytes, maximum_memory_bytes)) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK ,(maximum_memory_bytes, maximum_memory_bytes)) faulthandler.disable() import builtins lowerCAmelCase__ : int = None lowerCAmelCase__ : Optional[Any] = None import os lowerCAmelCase__ : Optional[int] = '1' lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Union[str, Any] = None lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : str = None lowerCAmelCase__ : List[str] = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Any = None lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : str = None lowerCAmelCase__ : str = None lowerCAmelCase__ : str = None lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : int = None lowerCAmelCase__ : Any = None lowerCAmelCase__ : str = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : int = None lowerCAmelCase__ : Optional[int] = None lowerCAmelCase__ : str = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Any = None import shutil lowerCAmelCase__ : int = None lowerCAmelCase__ : Union[str, Any] = None lowerCAmelCase__ : Any = None import subprocess lowerCAmelCase__ : Any = None # type: ignore lowerCAmelCase__ : str = None import sys lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Optional[Any] = None
368
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __snake_case : Any ={ 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Union[str, Any] =['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : List[str] =[ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : str =[ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case : Dict =[ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys __snake_case : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
94
0
import torch from diffusers import UnCLIPScheduler from .test_schedulers import SchedulerCommonTest class snake_case ( _lowerCamelCase ): '''simple docstring''' snake_case_ : Optional[int] = (UnCLIPScheduler,) def UpperCamelCase_ ( self : List[Any] , **lowerCAmelCase : Dict) -> Any: """simple docstring""" _snake_case : Dict = { 'num_train_timesteps': 1000, 'variance_type': 'fixed_small_log', 'clip_sample': True, 'clip_sample_range': 1.0, 'prediction_type': 'epsilon', } config.update(**_SCREAMING_SNAKE_CASE) return config def UpperCamelCase_ ( self : str) -> Tuple: """simple docstring""" for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self : Optional[int]) -> Optional[int]: """simple docstring""" for variance in ["fixed_small_log", "learned_range"]: self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self : Union[str, Any]) -> int: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self : Union[str, Any]) -> List[Any]: """simple docstring""" for clip_sample_range in [1, 5, 10, 20]: self.check_over_configs(clip_sample_range=_SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self : Dict) -> Tuple: """simple docstring""" for prediction_type in ["epsilon", "sample"]: self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self : Optional[int]) -> List[str]: """simple docstring""" for time_step in [0, 500, 999]: for prev_timestep in [None, 5, 100, 250, 500, 750]: if prev_timestep is not None and prev_timestep >= time_step: continue self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self : Optional[Any]) -> List[str]: """simple docstring""" _snake_case : Optional[Any] = self.scheduler_classes[0] _snake_case : Union[str, Any] = self.get_scheduler_config(variance_type="""fixed_small_log""") _snake_case : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE) assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.0000E-10)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_549_625)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.9_994_987)) < 1E-5 def UpperCamelCase_ ( self : Tuple) -> Union[str, Any]: """simple docstring""" _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Dict = self.get_scheduler_config(variance_type="""learned_range""") _snake_case : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE) _snake_case : str = 0.5 assert scheduler._get_variance(1 , predicted_variance=_SCREAMING_SNAKE_CASE) - -10.1_712_790 < 1E-5 assert scheduler._get_variance(487 , predicted_variance=_SCREAMING_SNAKE_CASE) - -5.7_998_052 < 1E-5 assert scheduler._get_variance(999 , predicted_variance=_SCREAMING_SNAKE_CASE) - -0.0_010_011 < 1E-5 def UpperCamelCase_ ( self : Union[str, Any]) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = self.scheduler_classes[0] _snake_case : Optional[int] = self.get_scheduler_config() _snake_case : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE) _snake_case : Tuple = scheduler.timesteps _snake_case : Optional[int] = self.dummy_model() _snake_case : int = self.dummy_sample_deter _snake_case : str = torch.manual_seed(0) for i, t in enumerate(_SCREAMING_SNAKE_CASE): # 1. predict noise residual _snake_case : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) # 2. predict previous mean of sample x_t-1 _snake_case : int = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE).prev_sample _snake_case : str = pred_prev_sample _snake_case : List[str] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE)) _snake_case : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 252.2_682_495) < 1E-2 assert abs(result_mean.item() - 0.3_284_743) < 1E-3 def UpperCamelCase_ ( self : List[Any]) -> Dict: """simple docstring""" _snake_case : Tuple = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config() _snake_case : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE) scheduler.set_timesteps(25) _snake_case : int = scheduler.timesteps _snake_case : List[Any] = self.dummy_model() _snake_case : List[Any] = self.dummy_sample_deter _snake_case : Any = torch.manual_seed(0) for i, t in enumerate(_SCREAMING_SNAKE_CASE): # 1. predict noise residual _snake_case : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) if i + 1 == timesteps.shape[0]: _snake_case : Any = None else: _snake_case : Optional[Any] = timesteps[i + 1] # 2. predict previous mean of sample x_t-1 _snake_case : Union[str, Any] = scheduler.step( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE).prev_sample _snake_case : List[Any] = pred_prev_sample _snake_case : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE)) _snake_case : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE)) assert abs(result_sum.item() - 258.2_044_983) < 1E-2 assert abs(result_mean.item() - 0.3_362_038) < 1E-3 def UpperCamelCase_ ( self : str) -> Optional[Any]: """simple docstring""" pass def UpperCamelCase_ ( self : Optional[Any]) -> Optional[int]: """simple docstring""" pass
317
"""simple docstring""" import math import sys def __lowerCAmelCase (_UpperCamelCase ): if number != int(_UpperCamelCase ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __lowerCAmelCase : Any = [-1] * (number + 1) __lowerCAmelCase : List[Any] = 0 for i in range(1 , number + 1 ): __lowerCAmelCase : List[Any] = sys.maxsize __lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) ) for j in range(1 , root + 1 ): __lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)] __lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : List[str] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
86
0
'''simple docstring''' import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ): super().__init__() UpperCAmelCase_ : Tuple = nn.ModuleList(_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = False ,_snake_case = True ,): for i, (image, scale, controlnet) in enumerate(zip(_snake_case ,_snake_case ,self.nets ) ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = controlnet( _snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,) # merge samples if i == 0: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = down_samples, mid_sample else: UpperCAmelCase_ : List[Any] = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(_snake_case ,_snake_case ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def UpperCamelCase__ ( self ,_snake_case ,_snake_case = True ,_snake_case = None ,_snake_case = False ,_snake_case = None ,): UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = save_directory for controlnet in self.nets: controlnet.save_pretrained( _snake_case ,is_main_process=_snake_case ,save_function=_snake_case ,safe_serialization=_snake_case ,variant=_snake_case ,) idx += 1 UpperCAmelCase_ : Union[str, Any] = model_path_to_save + f'''_{idx}''' @classmethod def UpperCamelCase__ ( cls ,_snake_case ,**_snake_case ): UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Dict = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... UpperCAmelCase_ : int = pretrained_model_path while os.path.isdir(_snake_case ): UpperCAmelCase_ : Union[str, Any] = ControlNetModel.from_pretrained(_snake_case ,**_snake_case ) controlnets.append(_snake_case ) idx += 1 UpperCAmelCase_ : Union[str, Any] = pretrained_model_path + f'''_{idx}''' logger.info(f'''{len(_snake_case )} controlnets loaded from {pretrained_model_path}.''' ) if len(_snake_case ) == 0: raise ValueError( f'''No ControlNets found under {os.path.dirname(_snake_case )}. Expected at least {pretrained_model_path + "_0"}.''' ) return cls(_snake_case )
67
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowerCamelCase = logging.get_logger(__name__) class _snake_case (__SCREAMING_SNAKE_CASE): def __init__( self ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ): UpperCAmelCase_ : List[Any] = feature_size UpperCAmelCase_ : Any = sampling_rate UpperCAmelCase_ : Any = padding_value UpperCAmelCase_ : Any = kwargs.pop("padding_side" ,"right" ) UpperCAmelCase_ : int = kwargs.pop("return_attention_mask" ,_snake_case ) super().__init__(**_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = True ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = None ,): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(_snake_case ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ): UpperCAmelCase_ : Dict = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" f''' to this method that includes {self.model_input_names[0]}, but you provided''' f''' {list(processed_features.keys() )}''' ) UpperCAmelCase_ : Optional[int] = processed_features[self.model_input_names[0]] UpperCAmelCase_ : Dict = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(_snake_case ) == 0: if return_attention_mask: UpperCAmelCase_ : List[str] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch UpperCAmelCase_ : Tuple = required_input[0] if isinstance(_snake_case ,(list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. UpperCAmelCase_ : int = 0 while len(required_input[index] ) == 0: index += 1 if index < len(_snake_case ): UpperCAmelCase_ : str = required_input[index][0] if return_tensors is None: if is_tf_tensor(_snake_case ): UpperCAmelCase_ : Any = "tf" elif is_torch_tensor(_snake_case ): UpperCAmelCase_ : Optional[int] = "pt" elif isinstance(_snake_case ,(int, float, list, tuple, np.ndarray) ): UpperCAmelCase_ : Any = "np" else: raise ValueError( f'''type of {first_element} unknown: {type(_snake_case )}. ''' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] ,(int, float) ): UpperCAmelCase_ : Optional[Any] = to_numpy(_snake_case ) else: UpperCAmelCase_ : Any = [to_numpy(_snake_case ) for v in value] # Convert padding_strategy in PaddingStrategy UpperCAmelCase_ : List[Any] = self._get_padding_strategies(padding=_snake_case ,max_length=_snake_case ) UpperCAmelCase_ : Dict = processed_features[self.model_input_names[0]] UpperCAmelCase_ : str = len(_snake_case ) if not all(len(_snake_case ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) UpperCAmelCase_ : Dict = [] for i in range(_snake_case ): UpperCAmelCase_ : List[Any] = {k: v[i] for k, v in processed_features.items()} # truncation UpperCAmelCase_ : Dict = self._truncate( _snake_case ,max_length=_snake_case ,pad_to_multiple_of=_snake_case ,truncation=_snake_case ,) truncated_inputs.append(_snake_case ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length UpperCAmelCase_ : List[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) UpperCAmelCase_ : str = PaddingStrategy.MAX_LENGTH UpperCAmelCase_ : Dict = {} for i in range(_snake_case ): # padding UpperCAmelCase_ : Dict = self._pad( truncated_inputs[i] ,max_length=_snake_case ,padding_strategy=_snake_case ,pad_to_multiple_of=_snake_case ,return_attention_mask=_snake_case ,) for key, value in outputs.items(): if key not in batch_outputs: UpperCAmelCase_ : Optional[Any] = [] if value.dtype is np.dtype(np.floataa ): UpperCAmelCase_ : str = value.astype(np.floataa ) batch_outputs[key].append(_snake_case ) return BatchFeature(_snake_case ,tensor_type=_snake_case ) def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = PaddingStrategy.DO_NOT_PAD ,_snake_case = None ,_snake_case = None ,): UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: UpperCAmelCase_ : Any = len(_snake_case ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_snake_case ) < max_length if return_attention_mask and "attention_mask" not in processed_features: UpperCAmelCase_ : List[str] = np.ones(len(_snake_case ) ,dtype=np.intaa ) if needs_to_be_padded: UpperCAmelCase_ : Union[str, Any] = max_length - len(_snake_case ) if self.padding_side == "right": if return_attention_mask: UpperCAmelCase_ : str = np.pad( processed_features["attention_mask"] ,(0, difference) ) UpperCAmelCase_ : str = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) UpperCAmelCase_ : int = np.pad( _snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: UpperCAmelCase_ : List[Any] = np.pad( processed_features["attention_mask"] ,(difference, 0) ) UpperCAmelCase_ : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) UpperCAmelCase_ : Union[str, Any] = np.pad( _snake_case ,_snake_case ,"constant" ,constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) UpperCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): UpperCAmelCase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of UpperCAmelCase_ : Dict = len(_snake_case ) > max_length if needs_to_be_truncated: UpperCAmelCase_ : Any = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: UpperCAmelCase_ : str = processed_features["attention_mask"][:max_length] return processed_features def UpperCamelCase__ ( self ,_snake_case=False ,_snake_case=None ): # Get padding strategy if padding is not False: if padding is True: UpperCAmelCase_ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : str = PaddingStrategy(_snake_case ) elif isinstance(_snake_case ,_snake_case ): UpperCAmelCase_ : List[Any] = padding else: UpperCAmelCase_ : List[str] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
67
1
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : List[str]=[10, 20, 30, 40] , UpperCamelCase__ : Optional[int]=[1, 1, 2, 1] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str="relu" , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Tuple=None , ) -> List[str]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = image_size __magic_name__ = num_channels __magic_name__ = embeddings_size __magic_name__ = hidden_sizes __magic_name__ = depths __magic_name__ = is_training __magic_name__ = use_labels __magic_name__ = hidden_act __magic_name__ = num_labels __magic_name__ = scope __magic_name__ = len(UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Tuple: """simple docstring""" __magic_name__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __magic_name__ = self.get_config() return config, pixel_values def _lowercase ( self : List[Any] ) -> Optional[Any]: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> List[str]: """simple docstring""" __magic_name__ = FlaxRegNetModel(config=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = FlaxRegNetForImageClassification(config=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() __magic_name__ , __magic_name__ = config_and_inputs __magic_name__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () a__ = False a__ = False a__ = False def _lowercase ( self : int ) -> None: """simple docstring""" __magic_name__ = FlaxRegNetModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return def _lowercase ( self : List[str] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def _lowercase ( self : Dict ) -> Dict: """simple docstring""" pass def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __magic_name__ = [*signature.parameters.keys()] __magic_name__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] ): __magic_name__ = model_class(UpperCamelCase__ ) __magic_name__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) ) __magic_name__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __magic_name__ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __magic_name__ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __magic_name__ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __magic_name__ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = model_class(UpperCamelCase__ ) @jax.jit def model_jitted(UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ): return model(pixel_values=UpperCamelCase__ , **UpperCamelCase__ ) with self.subTest("""JIT Enabled""" ): __magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __magic_name__ = model_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( ): '''simple docstring''' __magic_name__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def _lowercase ( self : Optional[Any] ) -> Tuple: """simple docstring""" __magic_name__ = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) __magic_name__ = self.default_image_processor __magic_name__ = prepare_img() __magic_name__ = image_processor(images=UpperCamelCase__ , return_tensors="""np""" ) __magic_name__ = model(**UpperCamelCase__ ) # verify the logits __magic_name__ = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) __magic_name__ = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
88
from functools import reduce _SCREAMING_SNAKE_CASE : Any = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def UpperCAmelCase_ ( _A = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) ) for i in range(len(_A ) - 12 ) ) if __name__ == "__main__": print(F"{solution() = }")
314
0
"""simple docstring""" import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''', datefmt='''%Y-%m-%d %H:%M:%S''', level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(), stream=sys.stdout, ) lowerCAmelCase__ = logging.getLogger(__name__) lowerCAmelCase__ = {'''facebook/bart-base''': BartForConditionalGeneration} lowerCAmelCase__ = {'''facebook/bart-base''': BartTokenizer} def a__ ( ): """simple docstring""" UpperCamelCase = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." ) parser.add_argument( "--validation_file" , type=__snake_case , default=__snake_case , help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length" , type=__snake_case , default=5 , help="The maximum total input sequence length after tokenization." , ) parser.add_argument( "--num_beams" , type=__snake_case , default=__snake_case , help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ) , ) parser.add_argument( "--model_name_or_path" , type=__snake_case , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__snake_case , ) parser.add_argument( "--config_name" , type=__snake_case , default=__snake_case , help="Pretrained config name or path if not the same as model_name" , ) parser.add_argument( "--device" , type=__snake_case , default="cpu" , help="Device where the model will be run" , ) parser.add_argument("--output_file_path" , type=__snake_case , default=__snake_case , help="Where to store the final ONNX file." ) UpperCamelCase = parser.parse_args() return args def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="cpu" ): """simple docstring""" UpperCamelCase = model_dict[model_name].from_pretrained(__snake_case ).to(__snake_case ) UpperCamelCase = tokenizer_dict[model_name].from_pretrained(__snake_case ) if model_name in ["facebook/bart-base"]: UpperCamelCase = 0 UpperCamelCase = None UpperCamelCase = 0 return huggingface_model, tokenizer def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" model.eval() UpperCamelCase = None UpperCamelCase = torch.jit.script(BARTBeamSearchGenerator(__snake_case ) ) with torch.no_grad(): UpperCamelCase = "My friends are cool but they eat too many carbs." UpperCamelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_024 , return_tensors="pt" ).to(model.device ) UpperCamelCase = model.generate( inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=__snake_case , max_length=__snake_case , early_stopping=__snake_case , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( __snake_case , ( inputs["input_ids"], inputs["attention_mask"], num_beams, max_length, model.config.decoder_start_token_id, ) , __snake_case , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={ "input_ids": {0: "batch", 1: "seq"}, "output_ids": {0: "batch", 1: "seq_out"}, } , example_outputs=__snake_case , ) logger.info("Model exported to {}".format(__snake_case ) ) UpperCamelCase = remove_dup_initializers(os.path.abspath(__snake_case ) ) logger.info("Deduplicated and optimized model written to {}".format(__snake_case ) ) UpperCamelCase = onnxruntime.InferenceSession(__snake_case ) UpperCamelCase = ort_sess.run( __snake_case , { "input_ids": inputs["input_ids"].cpu().numpy(), "attention_mask": inputs["attention_mask"].cpu().numpy(), "num_beams": np.array(__snake_case ), "max_length": np.array(__snake_case ), "decoder_start_token_id": np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 ) logger.info("Model outputs from torch and ONNX Runtime are similar." ) logger.info("Success." ) def a__ ( ): """simple docstring""" UpperCamelCase = parse_args() UpperCamelCase = 5 UpperCamelCase = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() UpperCamelCase = torch.device(args.device ) UpperCamelCase , UpperCamelCase = load_model_tokenizer(args.model_name_or_path , __snake_case ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" ) model.to(__snake_case ) if args.max_length: UpperCamelCase = args.max_length if args.num_beams: UpperCamelCase = args.num_beams if args.output_file_path: UpperCamelCase = args.output_file_path else: UpperCamelCase = "BART.onnx" logger.info("Exporting model to ONNX" ) export_and_validate_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if __name__ == "__main__": main()
362
"""simple docstring""" from statistics import mean, stdev def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ): """simple docstring""" UpperCamelCase = min(_SCREAMING_SNAKE_CASE ) UpperCamelCase = max(_SCREAMING_SNAKE_CASE ) # normalize data return [round((x - x_min) / (x_max - x_min) , _SCREAMING_SNAKE_CASE ) for x in data] def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 ): """simple docstring""" UpperCamelCase = mean(_SCREAMING_SNAKE_CASE ) UpperCamelCase = stdev(_SCREAMING_SNAKE_CASE ) # standardize data return [round((x - mu) / (sigma) , _SCREAMING_SNAKE_CASE ) for x in data]
244
0
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def __SCREAMING_SNAKE_CASE ( A_ ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f) or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) # or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) # or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) # or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) # or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f) or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) # ): # return True return False def __SCREAMING_SNAKE_CASE ( A_ ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase__ : Tuple = ord(A_ ) if not _is_chinese_char(A_ ): return 0 return 1 def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : str = set() for token in tokens: lowerCAmelCase__ : str = len(A_ ) > 1 and is_chinese(A_ ) if chinese_word: word_set.add(A_ ) lowerCAmelCase__ : str = list(A_ ) return word_list def __SCREAMING_SNAKE_CASE ( A_ , A_ ): if not chinese_word_set: return bert_tokens lowerCAmelCase__ : int = max([len(A_ ) for w in chinese_word_set] ) lowerCAmelCase__ : Tuple = bert_tokens lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = 0, len(A_ ) while start < end: lowerCAmelCase__ : int = True if is_chinese(bert_word[start] ): lowerCAmelCase__ : str = min(end - start , A_ ) for i in range(A_ , 1 , -1 ): lowerCAmelCase__ : Dict = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase__ : Any = '''##''' + bert_word[j] lowerCAmelCase__ : str = start + i lowerCAmelCase__ : List[Any] = False break if single_word: start += 1 return bert_word def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): lowerCAmelCase__ : Dict = [] for i in range(0 , len(A_ ) , 1_00 ): lowerCAmelCase__ : List[str] = ltp_tokenizer.seg(lines[i : i + 1_00] )[0] lowerCAmelCase__ : Union[str, Any] = [get_chinese_word(A_ ) for r in res] ltp_res.extend(A_ ) assert len(A_ ) == len(A_ ) lowerCAmelCase__ : Union[str, Any] = [] for i in range(0 , len(A_ ) , 1_00 ): lowerCAmelCase__ : Tuple = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=A_ , truncation=A_ , max_length=5_12 ) bert_res.extend(res['''input_ids'''] ) assert len(A_ ) == len(A_ ) lowerCAmelCase__ : List[str] = [] for input_ids, chinese_word in zip(A_ , A_ ): lowerCAmelCase__ : List[Any] = [] for id in input_ids: lowerCAmelCase__ : Optional[Any] = bert_tokenizer._convert_id_to_token(A_ ) input_tokens.append(A_ ) lowerCAmelCase__ : Any = add_sub_symbol(A_ , A_ ) lowerCAmelCase__ : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(A_ ): if token[:2] == "##": lowerCAmelCase__ : List[str] = token[2:] # save chinese tokens' pos if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ): ref_id.append(A_ ) ref_ids.append(A_ ) assert len(A_ ) == len(A_ ) return ref_ids def __SCREAMING_SNAKE_CASE ( A_ ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase__ : List[str] = f.readlines() lowerCAmelCase__ : Union[str, Any] = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase__ : int = LTP(args.ltp ) # faster in GPU device lowerCAmelCase__ : str = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase__ : List[Any] = prepare_ref(A_ , A_ , A_ ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase__ : str = [json.dumps(A_ ) + '''\n''' for ref in ref_ids] f.writelines(A_ ) if __name__ == "__main__": __UpperCamelCase : List[str] = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') __UpperCamelCase : Optional[Any] = parser.parse_args() main(args)
106
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : List[Any] ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : str ): lowerCAmelCase__ : int = dataset lowerCAmelCase__ : List[str] = process lowerCAmelCase__ : Dict = params def __len__( self : Any ): return len(self.dataset ) def __getitem__( self : Union[str, Any] ,lowercase_ : List[Any] ): lowerCAmelCase__ : Union[str, Any] = self.dataset[i] lowerCAmelCase__ : Optional[Any] = self.process(lowercase_ ,**self.params ) return processed class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[int] ,lowercase_ : Optional[Any] ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Tuple=None ): lowerCAmelCase__ : List[Any] = loader lowerCAmelCase__ : int = infer lowerCAmelCase__ : List[str] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether lowerCAmelCase__ : int = None lowerCAmelCase__ : Dict = loader_batch_size # Internal bookkeeping lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Optional[int] = None def __len__( self : Union[str, Any] ): return len(self.loader ) def __iter__( self : List[Any] ): lowerCAmelCase__ : List[Any] = iter(self.loader ) return self def __lowerCAmelCase ( self : Tuple ): if isinstance(self._loader_batch_data ,torch.Tensor ): # Batch data is simple tensor, just fetch the slice lowerCAmelCase__ : Tuple = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) lowerCAmelCase__ : int = {} for k, element in self._loader_batch_data.items(): if isinstance(lowercase_ ,lowercase_ ): # Convert ModelOutput to tuple first lowerCAmelCase__ : List[Any] = element.to_tuple() if isinstance(element[0] ,torch.Tensor ): lowerCAmelCase__ : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): lowerCAmelCase__ : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ ,lowercase_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] ,torch.Tensor ): lowerCAmelCase__ : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] ,np.ndarray ): lowerCAmelCase__ : Optional[int] = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around lowerCAmelCase__ : Dict = None elif isinstance(element[self._loader_batch_index] ,torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCAmelCase__ : str = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] ,np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers lowerCAmelCase__ : Tuple = np.expand_dims(element[self._loader_batch_index] ,0 ) else: # This is typically a list, so no need to `unsqueeze`. lowerCAmelCase__ : int = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 lowerCAmelCase__ : int = self._loader_batch_data.__class__(lowercase_ ) self._loader_batch_index += 1 return result def __lowerCAmelCase ( self : Optional[int] ): if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch lowerCAmelCase__ : Dict = next(self.iterator ) lowerCAmelCase__ : List[Any] = self.infer(lowercase_ ,**self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowercase_ ,torch.Tensor ): lowerCAmelCase__ : int = processed else: lowerCAmelCase__ : Union[str, Any] = list(processed.keys() )[0] lowerCAmelCase__ : Union[str, Any] = processed[key] if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : List[Any] = len(lowercase_ ) else: lowerCAmelCase__ : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCAmelCase__ : Optional[Any] = observed_batch_size # Setting internal index to unwrap the batch lowerCAmelCase__ : str = processed lowerCAmelCase__ : Any = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : int ,lowercase_ : str ,lowercase_ : str ,lowercase_ : Union[str, Any] ,lowercase_ : int=None ): super().__init__(lowercase_ ,lowercase_ ,lowercase_ ) def __iter__( self : List[Any] ): lowerCAmelCase__ : Dict = iter(self.loader ) lowerCAmelCase__ : Tuple = None return self def __lowerCAmelCase ( self : Optional[int] ): if self.subiterator is None: lowerCAmelCase__ : List[Any] = self.infer(next(self.iterator ) ,**self.params ) try: # Try to return next item lowerCAmelCase__ : Optional[int] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params ) lowerCAmelCase__ : int = next(self.subiterator ) return processed class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __iter__( self : Tuple ): lowerCAmelCase__ : int = iter(self.loader ) return self def __lowerCAmelCase ( self : List[Any] ): # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. lowerCAmelCase__ : Tuple = False lowerCAmelCase__ : str = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: lowerCAmelCase__ : Dict = self.loader_batch_item() lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' ) accumulator.append(lowercase_ ) if is_last: return accumulator while not is_last: lowerCAmelCase__ : Any = self.infer(next(self.iterator ) ,**self.params ) if self.loader_batch_size is not None: if isinstance(lowercase_ ,torch.Tensor ): lowerCAmelCase__ : Tuple = processed else: lowerCAmelCase__ : List[Any] = list(processed.keys() )[0] lowerCAmelCase__ : Union[str, Any] = processed[key] if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : Tuple = len(lowercase_ ) else: lowerCAmelCase__ : str = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. lowerCAmelCase__ : Optional[int] = observed_batch_size lowerCAmelCase__ : Optional[int] = processed lowerCAmelCase__ : Optional[int] = 0 while self._loader_batch_index < self.loader_batch_size: lowerCAmelCase__ : Any = self.loader_batch_item() lowerCAmelCase__ : Optional[Any] = item.pop('''is_last''' ) accumulator.append(lowercase_ ) if is_last: return accumulator else: lowerCAmelCase__ : Dict = processed lowerCAmelCase__ : Tuple = item.pop('''is_last''' ) accumulator.append(lowercase_ ) return accumulator class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : int ,lowercase_ : Dataset ,lowercase_ : str ): lowerCAmelCase__ : List[Any] = dataset lowerCAmelCase__ : List[Any] = key def __len__( self : List[Any] ): return len(self.dataset ) def __getitem__( self : str ,lowercase_ : Union[str, Any] ): return self.dataset[i][self.key] class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Dict ,lowercase_ : Dataset ,lowercase_ : str ,lowercase_ : str ): lowerCAmelCase__ : str = dataset lowerCAmelCase__ : List[str] = keya lowerCAmelCase__ : Optional[Any] = keya def __len__( self : str ): return len(self.dataset ) def __getitem__( self : Optional[int] ,lowercase_ : Union[str, Any] ): return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
106
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __a = logging.get_logger(__name__) __a = {'vocab_file': 'spm_char.model'} __a = { 'vocab_file': { 'microsoft/speecht5_asr': 'https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model', 'microsoft/speecht5_tts': 'https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model', 'microsoft/speecht5_vc': 'https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model', } } __a = { 'microsoft/speecht5_asr': 1_024, 'microsoft/speecht5_tts': 1_024, 'microsoft/speecht5_vc': 1_024, } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Any = VOCAB_FILES_NAMES UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : List[str] = ['''input_ids''', '''attention_mask'''] def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[Any]="<s>" , lowerCAmelCase__ : int="</s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : int="<pad>" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : List[Any] , ) -> str: """simple docstring""" _UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , ) _UpperCAmelCase : Tuple = vocab_file _UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_lowerCAmelCase ) @property def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]: """simple docstring""" return self.sp_model.get_piece_size() def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : int = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Dict ) -> Dict: """simple docstring""" _UpperCAmelCase : Dict = self.__dict__.copy() _UpperCAmelCase : Optional[int] = None return state def __setstate__( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): _UpperCAmelCase : List[str] = {} _UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : str ) -> Union[str, Any]: """simple docstring""" return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.sp_model.piece_to_id(_lowerCAmelCase ) def _lowerCAmelCase ( self : int , lowerCAmelCase__ : int ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.sp_model.IdToPiece(_lowerCAmelCase ) return token def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[str] ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : List[str] = [] _UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCAmelCase ) + token _UpperCAmelCase : str = [] else: current_sub_tokens.append(_lowerCAmelCase ) out_string += self.sp_model.decode(_lowerCAmelCase ) return out_string.strip() def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any=None ) -> Union[str, Any]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> Union[str, Any]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase ) _UpperCAmelCase : str = [1] if token_ids_a is None: return ([0] * len(_lowerCAmelCase )) + suffix_ones return ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> List[Any]: """simple docstring""" if not os.path.isdir(_lowerCAmelCase ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCAmelCase : List[Any] = os.path.join( _lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCAmelCase , "wb" ) as fi: _UpperCAmelCase : int = self.sp_model.serialized_model_proto() fi.write(_lowerCAmelCase ) return (out_vocab_file,)
353
'''simple docstring''' import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Optional[int] = (EulerDiscreteScheduler,) UpperCamelCase_ : Tuple = 10 def _lowerCAmelCase ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Any: """simple docstring""" _UpperCAmelCase : str = { "num_train_timesteps": 1_1_0_0, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Any = self.dummy_model() _UpperCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : List[Any] = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = output.prev_sample _UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="v_prediction" ) _UpperCAmelCase : Any = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = self.dummy_model() _UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma _UpperCAmelCase : Tuple = sample.to(lowerCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = output.prev_sample _UpperCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 0.0002 ) < 1e-2 assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3 def _lowerCAmelCase ( self : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : Optional[int] = self.scheduler_classes[0] _UpperCAmelCase : List[Any] = self.get_scheduler_config() _UpperCAmelCase : int = scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : str = self.dummy_model() _UpperCAmelCase : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : str = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[str] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Any = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : int = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 10.0807 ) < 1e-2 assert abs(result_mean.item() - 0.0131 ) < 1e-3 def _lowerCAmelCase ( self : List[str] ) -> int: """simple docstring""" _UpperCAmelCase : List[Any] = self.scheduler_classes[0] _UpperCAmelCase : int = self.get_scheduler_config() _UpperCAmelCase : Union[str, Any] = scheduler_class(**lowerCAmelCase__ , use_karras_sigmas=lowerCAmelCase__ ) scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase__ ) _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = self.dummy_model() _UpperCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() _UpperCAmelCase : Optional[int] = sample.to(lowerCAmelCase__ ) for t in scheduler.timesteps: _UpperCAmelCase : List[Any] = scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = model(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ) _UpperCAmelCase : List[Any] = output.prev_sample _UpperCAmelCase : List[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) ) _UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2 assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
17
0
"""simple docstring""" a = [ '''Audio''', '''Array2D''', '''Array3D''', '''Array4D''', '''Array5D''', '''ClassLabel''', '''Features''', '''Sequence''', '''Value''', '''Image''', '''Translation''', '''TranslationVariableLanguages''', ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
315
"""simple docstring""" import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(__lowerCAmelCase ) , '''Tatoeba directory does not exist.''' ) class lowercase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self : Optional[Any] ): _A = tempfile.mkdtemp() return TatoebaConverter(save_dir=_UpperCAmelCase ) @slow def lowerCAmelCase_ ( self : Optional[int] ): self.resolver.convert_models(['heb-eng'] ) @slow def lowerCAmelCase_ ( self : Optional[Any] ): _A , _A = self.resolver.write_model_card('opus-mt-he-en' , dry_run=_UpperCAmelCase ) assert mmeta["long_pair"] == "heb-eng"
315
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING UpperCAmelCase_ : List[Any] = logging.get_logger(__name__) @add_end_docstrings(_UpperCAmelCase ) class UpperCamelCase ( _UpperCAmelCase ): def __init__( self , **UpperCAmelCase__ ): super().__init__(**UpperCAmelCase__ ) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , "vision" ) self.check_model_type(UpperCAmelCase__ ) def __call__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , **UpperCAmelCase__ , ): if "text_queries" in kwargs: A__ = kwargs.pop("text_queries" ) if isinstance(UpperCAmelCase__ , (str, Image.Image) ): A__ = {"image": image, "candidate_labels": candidate_labels} else: A__ = image A__ = super().__call__(UpperCAmelCase__ , **UpperCAmelCase__ ) return results def __A ( self , **UpperCAmelCase__ ): A__ = {} if "threshold" in kwargs: A__ = kwargs["threshold"] if "top_k" in kwargs: A__ = kwargs["top_k"] return {}, {}, postprocess_params def __A ( self , UpperCAmelCase__ ): A__ = load_image(inputs["image"] ) A__ = inputs["candidate_labels"] if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): A__ = candidate_labels.split("," ) A__ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(UpperCAmelCase__ ): A__ = self.tokenizer(UpperCAmelCase__ , return_tensors=self.framework ) A__ = self.image_processor(UpperCAmelCase__ , return_tensors=self.framework ) yield { "is_last": i == len(UpperCAmelCase__ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def __A ( self , UpperCAmelCase__ ): A__ = model_inputs.pop("target_size" ) A__ = model_inputs.pop("candidate_label" ) A__ = model_inputs.pop("is_last" ) A__ = self.model(**UpperCAmelCase__ ) A__ = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs} return model_outputs def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0.1 , UpperCAmelCase__=None ): A__ = [] for model_output in model_outputs: A__ = model_output["candidate_label"] A__ = BaseModelOutput(UpperCAmelCase__ ) A__ = self.image_processor.post_process_object_detection( outputs=UpperCAmelCase__ , threshold=UpperCAmelCase__ , target_sizes=model_output["target_size"] )[0] for index in outputs["scores"].nonzero(): A__ = outputs["scores"][index].item() A__ = self._get_bounding_box(outputs["boxes"][index][0] ) A__ = {"score": score, "label": label, "box": box} results.append(UpperCAmelCase__ ) A__ = sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : x["score"] , reverse=UpperCAmelCase__ ) if top_k: A__ = results[:top_k] return results def __A ( self , UpperCAmelCase__ ): if self.framework != "pt": raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." ) A__ , A__ , A__ , A__ = box.int().tolist() A__ = { "xmin": xmin, "ymin": ymin, "xmax": xmax, "ymax": ymax, } return bbox
198
import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase ( _UpperCAmelCase ): def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = False , UpperCAmelCase__ = False , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ): super().__init__( UpperCAmelCase__ , split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , num_proc=UpperCAmelCase__ , **UpperCAmelCase__ , ) A__ = field A__ = path_or_paths if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else {self.split: path_or_paths} A__ = Json( cache_dir=UpperCAmelCase__ , data_files=UpperCAmelCase__ , features=UpperCAmelCase__ , field=UpperCAmelCase__ , **UpperCAmelCase__ , ) def __A ( self ): # Build iterable dataset if self.streaming: A__ = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: A__ = None A__ = None A__ = None A__ = None self.builder.download_and_prepare( download_config=UpperCAmelCase__ , download_mode=UpperCAmelCase__ , verification_mode=UpperCAmelCase__ , base_path=UpperCAmelCase__ , num_proc=self.num_proc , ) A__ = self.builder.as_dataset( split=self.split , verification_mode=UpperCAmelCase__ , in_memory=self.keep_in_memory ) return dataset class UpperCamelCase : def __init__( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = None , UpperCAmelCase__ = None , **UpperCAmelCase__ , ): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) A__ = dataset A__ = path_or_buf A__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE A__ = num_proc A__ = "utf-8" A__ = to_json_kwargs def __A ( self ): A__ = self.to_json_kwargs.pop("path_or_buf" , UpperCAmelCase__ ) A__ = self.to_json_kwargs.pop("orient" , "records" ) A__ = self.to_json_kwargs.pop("lines" , True if orient == "records" else False ) A__ = self.to_json_kwargs.pop("index" , False if orient in ["split", "table"] else True ) A__ = self.to_json_kwargs.pop("compression" , UpperCAmelCase__ ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(F"""`datasets` currently does not support {compression} compression""" ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , "wb" , compression=UpperCAmelCase__ ) as buffer: A__ = self._write(file_obj=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( F"""The compression parameter is not supported when writing to a buffer, but compression={compression}""" " was passed. Please provide a local path instead." ) A__ = self._write( file_obj=self.path_or_buf , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **self.to_json_kwargs ) return written def __A ( self , UpperCAmelCase__ ): A__ , A__ , A__ , A__ , A__ = args A__ = query_table( table=self.dataset.data , key=slice(UpperCAmelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) A__ = batch.to_pandas().to_json( path_or_buf=UpperCAmelCase__ , orient=UpperCAmelCase__ , lines=UpperCAmelCase__ , index=UpperCAmelCase__ , **UpperCAmelCase__ ) if not json_str.endswith("\n" ): json_str += "\n" return json_str.encode(self.encoding ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ , ): A__ = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): A__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(UpperCAmelCase__ ) else: A__ , A__ = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCAmelCase__ , UpperCAmelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating json from Arrow format" , ): written += file_obj.write(UpperCAmelCase__ ) return written
198
1
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple: lowercase__ : Optional[int] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' lowercase__ : List[str] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('''RGB''' ) lowercase__ : Any = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ), ] ) lowercase__ : Dict = transform(__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase ) return image def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]: if "visual_encoder" in key: lowercase__ : Any = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __lowerCamelCase ) if "blocks" in key: lowercase__ : List[str] = re.sub(r'''blocks''' , '''layers''' , __lowerCamelCase ) if "attn" in key: lowercase__ : List[str] = re.sub(r'''attn''' , '''self_attn''' , __lowerCamelCase ) if "norm1" in key: lowercase__ : Dict = re.sub(r'''norm1''' , '''layer_norm1''' , __lowerCamelCase ) if "norm2" in key: lowercase__ : Optional[Any] = re.sub(r'''norm2''' , '''layer_norm2''' , __lowerCamelCase ) if "encoder.norm" in key: lowercase__ : Union[str, Any] = re.sub(r'''encoder.norm''' , '''post_layernorm''' , __lowerCamelCase ) if "encoder.patch_embed.proj" in key: lowercase__ : int = re.sub(r'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __lowerCamelCase ) if "encoder.pos_embed" in key: lowercase__ : int = re.sub(r'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __lowerCamelCase ) if "encoder.cls_token" in key: lowercase__ : Dict = re.sub(r'''encoder.cls_token''' , '''embeddings.class_embedding''' , __lowerCamelCase ) if "self_attn" in key: lowercase__ : Optional[int] = re.sub(r'''self_attn.proj''' , '''self_attn.projection''' , __lowerCamelCase ) return key @torch.no_grad() def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase=None ) -> Optional[int]: if config_path is not None: lowercase__ : Union[str, Any] = BlipConfig.from_pretrained(__lowerCamelCase ) else: lowercase__ : str = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} ) lowercase__ : Dict = BlipForConditionalGeneration(__lowerCamelCase ).eval() lowercase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' lowercase__ : Optional[int] = blip_decoder(pretrained=__lowerCamelCase , image_size=3_84 , vit='''base''' ) lowercase__ : Optional[int] = pt_model.eval() lowercase__ : Union[str, Any] = pt_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : Any = modified_state_dict.pop(__lowerCamelCase ) lowercase__ : Union[str, Any] = rename_key(__lowerCamelCase ) lowercase__ : Union[str, Any] = value hf_model.load_state_dict(__lowerCamelCase ) lowercase__ : int = 3_84 lowercase__ : Optional[int] = load_demo_image(image_size=__lowerCamelCase , device='''cpu''' ) lowercase__ : str = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowercase__ : Dict = tokenizer(['''a picture of'''] ).input_ids lowercase__ : Tuple = hf_model.generate(__lowerCamelCase , __lowerCamelCase ) assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] lowercase__ : Tuple = hf_model.generate(__lowerCamelCase ) assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(__lowerCamelCase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' lowercase__ : int = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) lowercase__ : Optional[Any] = blip_vqa(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit='''base''' ) vqa_model.eval() lowercase__ : Optional[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : str = modified_state_dict.pop(__lowerCamelCase ) lowercase__ : Optional[Any] = rename_key(__lowerCamelCase ) lowercase__ : List[Any] = value lowercase__ : Tuple = BlipForQuestionAnswering(__lowerCamelCase ) hf_vqa_model.load_state_dict(__lowerCamelCase ) lowercase__ : Union[str, Any] = ['''How many dogs are in this image?'''] lowercase__ : Tuple = tokenizer(__lowerCamelCase , return_tensors='''pt''' ).input_ids lowercase__ : Optional[int] = hf_vqa_model.generate(__lowerCamelCase , __lowerCamelCase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) lowercase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' lowercase__ : str = blip_itm(pretrained=__lowerCamelCase , image_size=__lowerCamelCase , vit='''base''' ) itm_model.eval() lowercase__ : str = itm_model.state_dict() for key in modified_state_dict.copy(): lowercase__ : str = modified_state_dict.pop(__lowerCamelCase ) lowercase__ : List[Any] = rename_key(__lowerCamelCase ) lowercase__ : Union[str, Any] = value lowercase__ : Union[str, Any] = BlipForImageTextRetrieval(__lowerCamelCase ) lowercase__ : Any = ['''A picture of a woman with a dog sitting in a beach'''] lowercase__ : Optional[Any] = tokenizer( __lowerCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__lowerCamelCase , max_length=35 , ).input_ids hf_itm_model.load_state_dict(__lowerCamelCase ) hf_itm_model.eval() lowercase__ : Optional[Any] = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase ) lowercase__ : Any = hf_itm_model(__lowerCamelCase , __lowerCamelCase , use_itm_head=__lowerCamelCase ) assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') lowerCAmelCase_ = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
16
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCAmelCase_ ( UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : torch.FloatTensor UpperCamelCase_ : Optional[torch.FloatTensor] = None class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ): '''simple docstring''' UpperCamelCase_ : Tuple = 2 @register_to_config def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]: '''simple docstring''' A: Union[str, Any] = sigma_max # setable values A: int = None A: np.IntTensor = None A: torch.FloatTensor = None # sigma(t_i) def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor: '''simple docstring''' return sample def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]: '''simple docstring''' A: List[Any] = num_inference_steps A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy() A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) A: str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: A: List[str] = 0 # sample eps ~ N(0, S_noise^2 * I) A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device ) A: Optional[Any] = sigma + gamma * sigma A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: Union[str, Any] = sample_hat + sigma_hat * model_output A: str = (sample_hat - pred_original_sample) / sigma_hat A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]: '''simple docstring''' A: int = sample_prev + sigma_prev * model_output A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict: '''simple docstring''' raise NotImplementedError()
319
0
"""simple docstring""" def lowercase_ ( _lowerCamelCase: list ) -> list: '''simple docstring''' def merge(_lowerCamelCase: list , _lowerCamelCase: list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_lowerCamelCase ) <= 1: return collection __lowerCamelCase : Union[str, Any] = len(_lowerCamelCase ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() __A = input('''Enter numbers separated by a comma:\n''').strip() __A = [int(item) for item in user_input.split(''',''')] print(*merge_sort(unsorted), sep=''',''')
64
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def lowercase_ ( _lowerCamelCase: Tuple , _lowerCamelCase: Dict=False ) -> Any: '''simple docstring''' __lowerCamelCase : Any = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowerCamelCase : str = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: List[str]=False ) -> Union[str, Any]: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: __lowerCamelCase : Any = "" else: __lowerCamelCase : Optional[int] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowerCamelCase : Optional[int] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" ) __lowerCamelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowerCamelCase : Union[str, Any] = in_proj_weight[ : config.hidden_size, : ] __lowerCamelCase : str = in_proj_bias[: config.hidden_size] __lowerCamelCase : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowerCamelCase : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowerCamelCase : Any = in_proj_weight[ -config.hidden_size :, : ] __lowerCamelCase : str = in_proj_bias[-config.hidden_size :] def lowercase_ ( _lowerCamelCase: int ) -> List[Any]: '''simple docstring''' __lowerCamelCase : Tuple = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def lowercase_ ( _lowerCamelCase: Tuple ) -> List[str]: '''simple docstring''' __lowerCamelCase : List[Any] = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] , _lowerCamelCase: Optional[int] ) -> Any: '''simple docstring''' __lowerCamelCase : str = dct.pop(_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = val def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> List[Any]: '''simple docstring''' __lowerCamelCase : int = ViTMSNConfig() __lowerCamelCase : Dict = 1000 __lowerCamelCase : str = "datasets/huggingface/label-files" __lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json" __lowerCamelCase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase ) , "r" ) ) __lowerCamelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()} __lowerCamelCase : int = idalabel __lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: __lowerCamelCase : int = 384 __lowerCamelCase : Optional[int] = 1536 __lowerCamelCase : str = 6 elif "l16" in checkpoint_url: __lowerCamelCase : Optional[Any] = 1024 __lowerCamelCase : str = 4096 __lowerCamelCase : Any = 24 __lowerCamelCase : Optional[int] = 16 __lowerCamelCase : Union[str, Any] = 0.1 elif "b4" in checkpoint_url: __lowerCamelCase : Optional[Any] = 4 elif "l7" in checkpoint_url: __lowerCamelCase : str = 7 __lowerCamelCase : int = 1024 __lowerCamelCase : int = 4096 __lowerCamelCase : Union[str, Any] = 24 __lowerCamelCase : Optional[int] = 16 __lowerCamelCase : List[Any] = 0.1 __lowerCamelCase : str = ViTMSNModel(_lowerCamelCase ) __lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["target_encoder"] __lowerCamelCase : Any = ViTImageProcessor(size=config.image_size ) remove_projection_head(_lowerCamelCase ) __lowerCamelCase : Tuple = create_rename_keys(_lowerCamelCase , base_model=_lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , base_model=_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) model.eval() __lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg" __lowerCamelCase : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) __lowerCamelCase : List[str] = ViTImageProcessor( size=config.image_size , image_mean=_lowerCamelCase , image_std=_lowerCamelCase ) __lowerCamelCase : Tuple = image_processor(images=_lowerCamelCase , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) __lowerCamelCase : Optional[int] = model(**_lowerCamelCase ) __lowerCamelCase : List[str] = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: __lowerCamelCase : Any = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: __lowerCamelCase : Optional[Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: __lowerCamelCase : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: __lowerCamelCase : str = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: __lowerCamelCase : Optional[int] = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCamelCase , atol=1E-4 ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __A = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
64
1
'''simple docstring''' from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('repo_id' , ['canonical_dataset_name', 'org-name/dataset-name'] ) @pytest.mark.parametrize('path' , ['filename.csv', 'filename with blanks.csv'] ) @pytest.mark.parametrize('revision' , [None, 'v2'] ) def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): UpperCAmelCase : Optional[Any] = hf_hub_url(repo_id=UpperCAmelCase_ , path=UpperCAmelCase_ , revision=UpperCAmelCase_ ) assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCAmelCase_ )}"""
151
'''simple docstring''' from __future__ import annotations def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ): if len(UpperCAmelCase_ ) < k or k < 0: raise ValueError('Invalid Input' ) UpperCAmelCase : Tuple = sum(array[:k] ) for i in range(len(UpperCAmelCase_ ) - k ): UpperCAmelCase : Optional[Any] = current_sum - array[i] + array[i + k] UpperCAmelCase : List[Any] = max(UpperCAmelCase_ , UpperCAmelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() lowercase__ = [randint(-1000, 1000) for i in range(100)] lowercase__ = randint(0, 110) print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
151
1
'''simple docstring''' from __future__ import annotations class SCREAMING_SNAKE_CASE : def __init__( self , _UpperCAmelCase): '''simple docstring''' __A : Optional[Any] = data __A : Node | None = None __A : Node | None = None def _lowerCAmelCase ( __snake_case : Node | None ) -> None: # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def _lowerCAmelCase ( __snake_case : Node | None ) -> int: return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def _lowerCAmelCase ( __snake_case : Node ) -> bool: if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def _lowerCAmelCase ( ) -> None: # Main function for testing. __A : List[str] = Node(1 ) __A : Optional[Any] = Node(2 ) __A : Dict = Node(3 ) __A : List[str] = Node(4 ) __A : Optional[Any] = Node(5 ) __A : List[str] = Node(6 ) __A : Dict = Node(7 ) __A : Optional[Any] = Node(8 ) __A : List[str] = Node(9 ) print(is_full_binary_tree(__snake_case ) ) print(depth_of_tree(__snake_case ) ) print('Tree is: ' ) display(__snake_case ) if __name__ == "__main__": main()
190
'''simple docstring''' import math def _lowerCAmelCase ( __snake_case : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase ( __snake_case : float = 0.1 ) -> int: __A : Dict = 3 __A : int = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(__snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
190
1
'''simple docstring''' from __future__ import annotations from typing import Any class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = num_of_nodes SCREAMING_SNAKE_CASE : list[list[int]] = [] SCREAMING_SNAKE_CASE : dict[int, int] = {} def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: SCREAMING_SNAKE_CASE : str = self.find_component(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : list[int] , lowerCamelCase_ : int , lowerCamelCase_ : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: SCREAMING_SNAKE_CASE : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(lowerCamelCase_ ) elif component_size[u_node] >= component_size[v_node]: SCREAMING_SNAKE_CASE : str = self.find_component(lowerCamelCase_ ) component_size[u_node] += component_size[v_node] self.set_component(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) SCREAMING_SNAKE_CASE : str = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = edge SCREAMING_SNAKE_CASE : Any = self.m_component[u] SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): SCREAMING_SNAKE_CASE : Optional[Any] = [u, v, w] for edge in minimum_weight_edge: if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = edge SCREAMING_SNAKE_CASE : Optional[Any] = self.m_component[u] SCREAMING_SNAKE_CASE : Dict = self.m_component[v] if u_component != v_component: mst_weight += w self.union(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' ) num_of_components -= 1 SCREAMING_SNAKE_CASE : List[Any] = [-1] * self.m_num_of_nodes print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' ) def __A ( ): """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
323
'''simple docstring''' import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class UpperCamelCase__ ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : Any = pad_token_id SCREAMING_SNAKE_CASE : List[Any] = max_length SCREAMING_SNAKE_CASE : Optional[int] = vocab SCREAMING_SNAKE_CASE : List[Any] = merges SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()] SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab() return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ): '''simple docstring''' return cls(**lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ ) if self.pad_token_id is not None: # pad the tokens up to max length SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length if max_length is not None: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs( lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
323
1
'''simple docstring''' import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ): A_ = KandinskyInpaintPipeline A_ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"] A_ = [ "prompt", "negative_prompt", "image_embeds", "negative_image_embeds", "image", "mask_image", ] A_ = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A_ = False @property def __UpperCAmelCase ( self ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 32 @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCAmelCase ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCAmelCase ( self ): '''simple docstring''' return 100 @property def __UpperCAmelCase ( self ): '''simple docstring''' __a : Dict = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' ) return tokenizer @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Union[str, Any] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) __a : List[Any] = MultilingualCLIP(__a ) __a : int = text_encoder.eval() return text_encoder @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : Optional[Any] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'text_image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'text_image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __a : str = UNetaDConditionModel(**__a ) return model @property def __UpperCAmelCase ( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self ): '''simple docstring''' torch.manual_seed(0 ) __a : List[Any] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self ): '''simple docstring''' __a : Tuple = self.dummy_text_encoder __a : List[str] = self.dummy_tokenizer __a : Optional[Any] = self.dummy_unet __a : str = self.dummy_movq __a : str = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type='epsilon' , thresholding=__a , ) __a : Optional[int] = { 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __UpperCAmelCase ( self , __a , __a=0 ): '''simple docstring''' __a : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__a ) ).to(__a ) __a : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__a ) # create init_image __a : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a ) __a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __a : str = Image.fromarray(np.uinta(__a ) ).convert('RGB' ).resize((256, 256) ) # create mask __a : Tuple = np.ones((64, 64) , dtype=np.floataa ) __a : Any = 0 if str(__a ).startswith('mps' ): __a : str = torch.manual_seed(__a ) else: __a : str = torch.Generator(device=__a ).manual_seed(__a ) __a : List[Any] = { 'prompt': 'horse', 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[Any] = 'cpu' __a : List[Any] = self.get_dummy_components() __a : str = self.pipeline_class(**__a ) __a : List[str] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __a : Tuple = pipe(**self.get_dummy_inputs(__a ) ) __a : Any = output.images __a : Optional[int] = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __a : int = image[0, -3:, -3:, -1] __a : Tuple = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) __a : Union[str, Any] = np.array( [0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCAmelCase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __UpperCAmelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): '''simple docstring''' __a : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' ) __a : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __a : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa ) __a : List[str] = 0 __a : Dict = 'a hat' __a : Any = KandinskyPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa ) pipe_prior.to(__a ) __a : List[str] = KandinskyInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa ) __a : Optional[Any] = pipeline.to(__a ) pipeline.set_progress_bar_config(disable=__a ) __a : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) __a , __a : Optional[Any] = pipe_prior( __a , generator=__a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __a : Tuple = pipeline( __a , image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , ) __a : Dict = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__a , __a )
294
'''simple docstring''' # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowercase : Dict = abspath(join(dirname(dirname(dirname(__file__))), 'src')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='ignore', category=FutureWarning) def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(_SCREAMING_SNAKE_CASE ) def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] ): from transformers.testing_utils import pytest_terminal_summary_main __a : Any = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(_SCREAMING_SNAKE_CASE , id=_SCREAMING_SNAKE_CASE )
294
1
"""simple docstring""" lowerCamelCase_ : List[str] = tuple[float, float, float] lowerCamelCase_ : Optional[int] = tuple[float, float, float] def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" A_ : List[Any] = end_pointa[0] - end_pointa[0] A_ : Dict = end_pointa[1] - end_pointa[1] A_ : List[Any] = end_pointa[2] - end_pointa[2] return (x, y, z) def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" A_ : List[str] = ab[1] * ac[2] - ab[2] * ac[1] # *i A_ : int = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j A_ : Optional[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" return tuple(round(_UpperCAmelCase , _UpperCAmelCase ) for x in vector ) == (0, 0, 0) def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10 ): """simple docstring""" A_ : Optional[int] = create_vector(_UpperCAmelCase , _UpperCAmelCase ) A_ : Dict = create_vector(_UpperCAmelCase , _UpperCAmelCase ) return is_zero_vector(get_ad_vectors_cross(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase )
286
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch lowerCamelCase_ : str = logging.get_logger(__name__) @add_end_docstrings( UpperCAmelCase__ , r""" top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). """ , ) class _UpperCAmelCase ( UpperCAmelCase__ ): '''simple docstring''' def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" if self.framework == "tf": A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ) else: raise ValueError('Unsupported framework' ) return masked_index def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" A_ : List[str] = self.get_masked_index(snake_case_ ) A_ : str = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , ) def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" if isinstance(snake_case_ , snake_case_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input['input_ids'][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case_ ) def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ): """simple docstring""" if return_tensors is None: A_ : Any = self.framework A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ ) self.ensure_exactly_one_mask_token(snake_case_ ) return model_inputs def lowerCamelCase_ ( self , snake_case_ ): """simple docstring""" A_ : Dict = self.model(**snake_case_ ) A_ : Optional[int] = model_inputs['input_ids'] return model_outputs def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ): """simple docstring""" if target_ids is not None and target_ids.shape[0] < top_k: A_ : str = target_ids.shape[0] A_ : Optional[Any] = model_outputs['input_ids'][0] A_ : List[Any] = model_outputs['logits'] if self.framework == "tf": A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] A_ : Union[str, Any] = outputs.numpy() A_ : Optional[int] = outputs[0, masked_index, :] A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 ) if target_ids is not None: A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) ) A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 ) A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ ) A_ , A_ : str = topk.values.numpy(), topk.indices.numpy() else: A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample A_ : Tuple = outputs[0, masked_index, :] A_ : List[str] = logits.softmax(dim=-1 ) if target_ids is not None: A_ : str = probs[..., target_ids] A_ , A_ : List[str] = probs.topk(snake_case_ ) A_ : List[Any] = [] A_ : int = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): A_ : str = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place A_ : Union[str, Any] = input_ids.numpy().copy() if target_ids is not None: A_ : str = target_ids[p].tolist() A_ : Union[str, Any] = p # Filter padding out: A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ ) A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence} row.append(snake_case_ ) result.append(snake_case_ ) if single_mask: return result[0] return result def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ): """simple docstring""" if isinstance(snake_case_ , snake_case_ ): A_ : List[str] = [targets] try: A_ : Optional[int] = self.tokenizer.get_vocab() except Exception: A_ : int = {} A_ : Tuple = [] for target in targets: A_ : int = vocab.get(snake_case_ , snake_case_ ) if id_ is None: A_ : Tuple = self.tokenizer( snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids'] if len(snake_case_ ) == 0: logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ 'We cannot replace it with anything meaningful, ignoring it' ) continue A_ : str = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F"""The specified target token `{target}` does not exist in the model vocabulary. """ F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" ) target_ids.append(id_ ) A_ : Tuple = list(set(snake_case_ ) ) if len(snake_case_ ) == 0: raise ValueError('At least one target must be provided when passed.' ) A_ : Optional[Any] = np.array(snake_case_ ) return target_ids def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ): """simple docstring""" A_ : List[str] = {} if targets is not None: A_ : Any = self.get_target_ids(snake_case_ , snake_case_ ) A_ : Optional[Any] = target_ids if top_k is not None: A_ : int = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( 'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' ) return {}, {}, postprocess_params def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ): """simple docstring""" A_ : List[str] = super().__call__(snake_case_ , **snake_case_ ) if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1: return outputs[0] return outputs
286
1
'''simple docstring''' import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _SCREAMING_SNAKE_CASE = logging.getLogger(__name__) class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' a : Union[str, Any] = "summarization" a : List[str] = ["loss"] a : Union[str, Any] = ROUGE_KEYS a : str = "rouge2" def __init__(self ,_lowerCamelCase ,**_lowerCamelCase ) -> int: '''simple docstring''' if hparams.sortish_sampler and hparams.gpus > 1: __lowercase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''' ) if hparams.sortish_sampler: raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''' ) super().__init__(_lowerCamelCase ,num_labels=_lowerCamelCase ,mode=self.mode ,**_lowerCamelCase ) use_task_specific_params(self.model ,'''summarization''' ) save_git_info(self.hparams.output_dir ) __lowercase = Path(self.output_dir ) / '''metrics.json''' __lowercase = Path(self.output_dir ) / '''hparams.pkl''' pickle_save(self.hparams ,self.hparams_save_path ) __lowercase = 0 __lowercase = defaultdict(_lowerCamelCase ) __lowercase = self.config.model_type __lowercase = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size __lowercase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } __lowercase = { '''train''': self.hparams.n_train, '''val''': self.hparams.n_val, '''test''': self.hparams.n_test, } __lowercase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} __lowercase = { '''train''': self.hparams.max_target_length, '''val''': self.hparams.val_max_target_length, '''test''': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) __lowercase = get_git_info()['''repo_sha'''] __lowercase = hparams.num_workers __lowercase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_lowerCamelCase ): __lowercase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] __lowercase = self.decoder_start_token_id __lowercase = ( SeqaSeqDataset if hasattr(self.tokenizer ,'''prepare_seq2seq_batch''' ) else LegacySeqaSeqDataset ) __lowercase = False __lowercase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: __lowercase = self.hparams.eval_max_gen_length else: __lowercase = self.model.config.max_length __lowercase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict[str, List[str]]: '''simple docstring''' __lowercase = { k: self.tokenizer.batch_decode(v.tolist() ) if '''mask''' not in k else v.shape for k, v in batch.items() } save_json(_lowerCamelCase ,Path(self.output_dir ) / '''text_batch.json''' ) save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / '''tok_batch.json''' ) __lowercase = True return readable_batch def _UpperCAmelCase (self ,_lowerCamelCase ,**_lowerCamelCase ) -> List[str]: '''simple docstring''' return self.model(_lowerCamelCase ,**_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict: '''simple docstring''' __lowercase = self.tokenizer.batch_decode( _lowerCamelCase ,skip_special_tokens=_lowerCamelCase ,clean_up_tokenization_spaces=_lowerCamelCase ) return lmap(str.strip ,_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ) -> Tuple: '''simple docstring''' __lowercase = self.tokenizer.pad_token_id __lowercase , __lowercase = batch['''input_ids'''], batch['''attention_mask'''] __lowercase = batch['''labels'''] if isinstance(self.model ,_lowerCamelCase ): __lowercase = self.model._shift_right(_lowerCamelCase ) else: __lowercase = shift_tokens_right(_lowerCamelCase ,_lowerCamelCase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero __lowercase = decoder_input_ids self.save_readable_batch(_lowerCamelCase ) __lowercase = self(_lowerCamelCase ,attention_mask=_lowerCamelCase ,decoder_input_ids=_lowerCamelCase ,use_cache=_lowerCamelCase ) __lowercase = outputs['''logits'''] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id __lowercase = nn.CrossEntropyLoss(ignore_index=_lowerCamelCase ) assert lm_logits.shape[-1] == self.vocab_size __lowercase = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) ) else: __lowercase = nn.functional.log_softmax(_lowerCamelCase ,dim=-1 ) __lowercase , __lowercase = label_smoothed_nll_loss( _lowerCamelCase ,_lowerCamelCase ,self.hparams.label_smoothing ,ignore_index=_lowerCamelCase ) return (loss,) @property def _UpperCAmelCase (self ) -> int: '''simple docstring''' return self.tokenizer.pad_token_id def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict: '''simple docstring''' __lowercase = self._step(_lowerCamelCase ) __lowercase = dict(zip(self.loss_names ,_lowerCamelCase ) ) # tokens per batch __lowercase = batch['''input_ids'''].ne(self.pad ).sum() + batch['''labels'''].ne(self.pad ).sum() __lowercase = batch['''input_ids'''].shape[0] __lowercase = batch['''input_ids'''].eq(self.pad ).sum() __lowercase = batch['''input_ids'''].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict: '''simple docstring''' return self._generative_step(_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase="val" ) -> Dict: '''simple docstring''' self.step_count += 1 __lowercase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} __lowercase = losses['''loss'''] __lowercase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['''gen_time''', '''gen_len'''] } __lowercase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) __lowercase = torch.tensor(_lowerCamelCase ).type_as(_lowerCamelCase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(_lowerCamelCase ) __lowercase = {f"{prefix}_avg_{k}": x for k, x in losses.items()} __lowercase = self.step_count self.metrics[prefix].append(_lowerCamelCase ) # callback writes this to self.metrics_save_path __lowercase = flatten_list([x['''preds'''] for x in outputs] ) return { "log": all_metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metric_tensor, } def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Dict: '''simple docstring''' return calculate_rouge(_lowerCamelCase ,_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ) -> dict: '''simple docstring''' __lowercase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') __lowercase = self.model.generate( batch['''input_ids'''] ,attention_mask=batch['''attention_mask'''] ,use_cache=_lowerCamelCase ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,) __lowercase = (time.time() - ta) / batch['''input_ids'''].shape[0] __lowercase = self.ids_to_clean_text(_lowerCamelCase ) __lowercase = self.ids_to_clean_text(batch['''labels'''] ) __lowercase = self._step(_lowerCamelCase ) __lowercase = dict(zip(self.loss_names ,_lowerCamelCase ) ) __lowercase = self.calc_generative_metrics(_lowerCamelCase ,_lowerCamelCase ) __lowercase = np.mean(lmap(_lowerCamelCase ,_lowerCamelCase ) ) base_metrics.update(gen_time=_lowerCamelCase ,gen_len=_lowerCamelCase ,preds=_lowerCamelCase ,target=_lowerCamelCase ,**_lowerCamelCase ) return base_metrics def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]: '''simple docstring''' return self._generative_step(_lowerCamelCase ) def _UpperCAmelCase (self ,_lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' return self.validation_epoch_end(_lowerCamelCase ,prefix='''test''' ) def _UpperCAmelCase (self ,_lowerCamelCase ) -> SeqaSeqDataset: '''simple docstring''' __lowercase = self.n_obs[type_path] __lowercase = self.target_lens[type_path] __lowercase = self.dataset_class( self.tokenizer ,type_path=_lowerCamelCase ,n_obs=_lowerCamelCase ,max_target_length=_lowerCamelCase ,**self.dataset_kwargs ,) return dataset def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase = False ) -> DataLoader: '''simple docstring''' __lowercase = self.get_dataset(_lowerCamelCase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": __lowercase = dataset.make_sortish_sampler(_lowerCamelCase ,distributed=self.hparams.gpus > 1 ) return DataLoader( _lowerCamelCase ,batch_size=_lowerCamelCase ,collate_fn=dataset.collate_fn ,shuffle=_lowerCamelCase ,num_workers=self.num_workers ,sampler=_lowerCamelCase ,) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": __lowercase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 ) return DataLoader( _lowerCamelCase ,batch_sampler=_lowerCamelCase ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,) else: return DataLoader( _lowerCamelCase ,batch_size=_lowerCamelCase ,collate_fn=dataset.collate_fn ,shuffle=_lowerCamelCase ,num_workers=self.num_workers ,sampler=_lowerCamelCase ,) def _UpperCAmelCase (self ) -> DataLoader: '''simple docstring''' __lowercase = self.get_dataloader('''train''' ,batch_size=self.hparams.train_batch_size ,shuffle=_lowerCamelCase ) return dataloader def _UpperCAmelCase (self ) -> DataLoader: '''simple docstring''' return self.get_dataloader('''val''' ,batch_size=self.hparams.eval_batch_size ) def _UpperCAmelCase (self ) -> DataLoader: '''simple docstring''' return self.get_dataloader('''test''' ,batch_size=self.hparams.eval_batch_size ) @staticmethod def _UpperCAmelCase (_lowerCamelCase ,_lowerCamelCase ) -> int: '''simple docstring''' BaseTransformer.add_model_specific_args(_lowerCamelCase ,_lowerCamelCase ) add_generic_args(_lowerCamelCase ,_lowerCamelCase ) parser.add_argument( '''--max_source_length''' ,default=1024 ,type=_lowerCamelCase ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--max_target_length''' ,default=56 ,type=_lowerCamelCase ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--val_max_target_length''' ,default=142 ,type=_lowerCamelCase ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument( '''--test_max_target_length''' ,default=142 ,type=_lowerCamelCase ,help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) ,) parser.add_argument('''--freeze_encoder''' ,action='''store_true''' ) parser.add_argument('''--freeze_embeds''' ,action='''store_true''' ) parser.add_argument('''--sortish_sampler''' ,action='''store_true''' ,default=_lowerCamelCase ) parser.add_argument('''--overwrite_output_dir''' ,action='''store_true''' ,default=_lowerCamelCase ) parser.add_argument('''--max_tokens_per_batch''' ,type=_lowerCamelCase ,default=_lowerCamelCase ) parser.add_argument('''--logger_name''' ,type=_lowerCamelCase ,choices=['''default''', '''wandb''', '''wandb_shared'''] ,default='''default''' ) parser.add_argument('''--n_train''' ,type=_lowerCamelCase ,default=-1 ,required=_lowerCamelCase ,help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_val''' ,type=_lowerCamelCase ,default=500 ,required=_lowerCamelCase ,help='''# examples. -1 means use all.''' ) parser.add_argument('''--n_test''' ,type=_lowerCamelCase ,default=-1 ,required=_lowerCamelCase ,help='''# examples. -1 means use all.''' ) parser.add_argument( '''--task''' ,type=_lowerCamelCase ,default='''summarization''' ,required=_lowerCamelCase ,help='''# examples. -1 means use all.''' ) parser.add_argument('''--label_smoothing''' ,type=_lowerCamelCase ,default=0.0 ,required=_lowerCamelCase ) parser.add_argument('''--src_lang''' ,type=_lowerCamelCase ,default='''''' ,required=_lowerCamelCase ) parser.add_argument('''--tgt_lang''' ,type=_lowerCamelCase ,default='''''' ,required=_lowerCamelCase ) parser.add_argument('''--eval_beams''' ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase ) parser.add_argument( '''--val_metric''' ,type=_lowerCamelCase ,default=_lowerCamelCase ,required=_lowerCamelCase ,choices=['''bleu''', '''rouge2''', '''loss''', None] ) parser.add_argument('''--eval_max_gen_length''' ,type=_lowerCamelCase ,default=_lowerCamelCase ,help='''never generate more than n tokens''' ) parser.add_argument('''--save_top_k''' ,type=_lowerCamelCase ,default=1 ,required=_lowerCamelCase ,help='''How many checkpoints to save''' ) parser.add_argument( '''--early_stopping_patience''' ,type=_lowerCamelCase ,default=-1 ,required=_lowerCamelCase ,help=( '''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So''' ''' val_check_interval will effect it.''' ) ,) return parser class __lowercase ( lowerCAmelCase__ ): '''simple docstring''' a : Any = "translation" a : Union[str, Any] = ["loss"] a : Optional[int] = ["bleu"] a : Optional[Any] = "bleu" def __init__(self ,_lowerCamelCase ,**_lowerCamelCase ) -> List[str]: '''simple docstring''' super().__init__(_lowerCamelCase ,**_lowerCamelCase ) __lowercase = hparams.src_lang __lowercase = hparams.tgt_lang def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> dict: '''simple docstring''' return calculate_bleu(_lowerCamelCase ,_lowerCamelCase ) def _lowerCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None ): Path(args.output_dir ).mkdir(exist_ok=lowerCamelCase_ ) check_output_dir(lowerCamelCase_ , expected_items=3 ) if model is None: if "summarization" in args.task: __lowercase = SummarizationModule(lowerCamelCase_ ) else: __lowercase = TranslationModule(lowerCamelCase_ ) __lowercase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('''/tmp''' ) or str(args.output_dir ).startswith('''/var''' ) ): __lowercase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger __lowercase = os.environ.get('''WANDB_PROJECT''' , lowerCamelCase_ ) __lowercase = WandbLogger(name=model.output_dir.name , project=lowerCamelCase_ ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger __lowercase = WandbLogger(name=model.output_dir.name , project=f"hf_{dataset}" ) if args.early_stopping_patience >= 0: __lowercase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: __lowercase = False __lowercase = args.val_metric == '''loss''' __lowercase = generic_train( lowerCamelCase_ , lowerCamelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , lowerCamelCase_ ) , early_stopping_callback=lowerCamelCase_ , logger=lowerCamelCase_ , ) pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' ) if not args.do_predict: return model __lowercase = '''''' __lowercase = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=lowerCamelCase_ ) ) if checkpoints: __lowercase = checkpoints[-1] __lowercase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() _SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(parser) _SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _SCREAMING_SNAKE_CASE = parser.parse_args() main(args)
217
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
217
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path a =[ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=True ) -> Dict: if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__lowercase ) ) class A_ ( __lowercase ): _UpperCAmelCase : Union[str, Any] = None _UpperCAmelCase : Dict = None def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any]): with TemporaryDirectory() as tmp_dir: __lowerCamelCase : int = dataset_module_factory(UpperCAmelCase__ ,cache_dir=UpperCAmelCase__) __lowerCamelCase : Union[str, Any] = import_main_class(dataset_module.module_path ,dataset=UpperCAmelCase__) __lowerCamelCase : Any = builder_cls( cache_dir=UpperCAmelCase__ ,config_name=UpperCAmelCase__ ,hash=dataset_module.hash ,) __lowerCamelCase : int = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=UpperCAmelCase__).replace(os.sep ,'/'), config.DATASET_INFO_FILENAME, ]) __lowerCamelCase : Union[str, Any] = cached_path(UpperCAmelCase__ ,cache_dir=UpperCAmelCase__) self.assertTrue(os.path.exists(UpperCAmelCase__)) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: __lowerCamelCase : Dict = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' __lowerCamelCase : Any = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase__ ) __lowerCamelCase : List[Any] = import_main_class(dataset_module.module_path ) __lowerCamelCase : List[str] = builder_cls( cache_dir=lowerCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam __lowerCamelCase : str = None builder_instance.download_and_prepare() __lowerCamelCase : Optional[Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any: __lowerCamelCase : Optional[Any] = dataset_module_factory('wikipedia' , cache_dir=lowerCamelCase__ ) __lowerCamelCase : Optional[int] = import_main_class(dataset_module.module_path , dataset=lowerCamelCase__ ) __lowerCamelCase : str = builder_cls( cache_dir=lowerCamelCase__ , config_name='20220301.frr' , hash=dataset_module.hash , ) __lowerCamelCase : Dict = builder_instance.as_streaming_dataset() assert ds assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) assert "train" in ds assert isinstance(ds['train'] , lowerCamelCase__ ) assert next(iter(ds['train'] ) )
73
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __snake_case =logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class UpperCAmelCase_ ( __lowercase ): lowerCamelCase : bool = field(default=__lowercase , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCamelCase : bool = field( default=__lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCamelCase : Optional[int] = field( default=__lowercase , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCamelCase : Optional[Union[str, Path, GenerationConfig]] = field( default=__lowercase , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def __UpperCAmelCase ( self : Dict ) -> List[str]: lowerCAmelCase = super().to_dict() for k, v in d.items(): if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): lowerCAmelCase = v.to_dict() return d
4
0
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase = logging.get_logger(__name__) lowerCAmelCase = { '''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''', # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr } class A ( A_ ): UpperCamelCase_ : Dict ='''deformable_detr''' UpperCamelCase_ : Optional[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__(self , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=3 , lowerCAmelCase=3_0_0 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8 , lowerCAmelCase=6 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=8 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase="relu" , lowerCAmelCase=2_5_6 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=1.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase="sine" , lowerCAmelCase="resnet50" , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=False , lowerCAmelCase=3_0_0 , lowerCAmelCase=False , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=1 , lowerCAmelCase=5 , lowerCAmelCase=2 , lowerCAmelCase=0.1 , lowerCAmelCase=0.25 , lowerCAmelCase=False , **lowerCAmelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' ) if not use_timm_backbone: if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' ) __lowercase= CONFIG_MAPPING['resnet'](out_features=['stage4'] ) elif isinstance(lowerCAmelCase , lowerCAmelCase ): __lowercase= backbone_config.get('model_type' ) __lowercase= CONFIG_MAPPING[backbone_model_type] __lowercase= config_class.from_dict(lowerCAmelCase ) __lowercase= use_timm_backbone __lowercase= backbone_config __lowercase= num_channels __lowercase= num_queries __lowercase= max_position_embeddings __lowercase= d_model __lowercase= encoder_ffn_dim __lowercase= encoder_layers __lowercase= encoder_attention_heads __lowercase= decoder_ffn_dim __lowercase= decoder_layers __lowercase= decoder_attention_heads __lowercase= dropout __lowercase= attention_dropout __lowercase= activation_dropout __lowercase= activation_function __lowercase= init_std __lowercase= init_xavier_std __lowercase= encoder_layerdrop __lowercase= auxiliary_loss __lowercase= position_embedding_type __lowercase= backbone __lowercase= use_pretrained_backbone __lowercase= dilation # deformable attributes __lowercase= num_feature_levels __lowercase= encoder_n_points __lowercase= decoder_n_points __lowercase= two_stage __lowercase= two_stage_num_proposals __lowercase= with_box_refine if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.' ) # Hungarian matcher __lowercase= class_cost __lowercase= bbox_cost __lowercase= giou_cost # Loss coefficients __lowercase= mask_loss_coefficient __lowercase= dice_loss_coefficient __lowercase= bbox_loss_coefficient __lowercase= giou_loss_coefficient __lowercase= eos_coefficient __lowercase= focal_alpha __lowercase= disable_custom_kernels super().__init__(is_encoder_decoder=lowerCAmelCase , **lowerCAmelCase ) @property def _A (self ): return self.encoder_attention_heads @property def _A (self ): return self.d_model def _A (self ): __lowercase= copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: __lowercase= self.backbone_config.to_dict() __lowercase= self.__class__.model_type return output
369
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , ) -> int: '''simple docstring''' __lowercase= {} if train_file is not None: __lowercase= [train_file] if eval_file is not None: __lowercase= [eval_file] if test_file is not None: __lowercase= [test_file] __lowercase= datasets.load_dataset('csv' , data_files=lowercase__ ) __lowercase= list(ds[list(files.keys() )[0]].features.keys() ) __lowercase= features_name.pop(lowercase__ ) __lowercase= list(set(ds[list(files.keys() )[0]][label_name] ) ) __lowercase= {label: i for i, label in enumerate(lowercase__ )} __lowercase= tokenizer.model_input_names __lowercase= {} if len(lowercase__ ) == 1: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' ) , batched=lowercase__ , ) elif len(lowercase__ ) == 2: for k in files.keys(): __lowercase= ds[k].map( lambda lowercase__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' , ) , batched=lowercase__ , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: __lowercase= {k: v for k, v in ex.items() if k in input_names} __lowercase= labelaid[ex[label_name]] yield (d, label) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: __lowercase= train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: __lowercase= val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) __lowercase= ( tf.data.Dataset.from_generator( lowercase__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: __lowercase= test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid lowerCAmelCase = logging.getLogger(__name__) @dataclass class A : UpperCamelCase_ : int =field(metadata={'''help''': '''Which column contains the label'''} ) UpperCamelCase_ : str =field(default=A_ , metadata={'''help''': '''The path of the training file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the development file'''} ) UpperCamelCase_ : Optional[str] =field(default=A_ , metadata={'''help''': '''The path of the test file'''} ) UpperCamelCase_ : int =field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) UpperCamelCase_ : bool =field( default=A_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : UpperCamelCase_ : str =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase_ : bool =field(default=A_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase_ : Optional[str] =field( default=A_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def _lowerCamelCase( ) -> Optional[Any]: '''simple docstring''' __lowercase= HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) __lowercase, __lowercase, __lowercase= parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.info( F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ' F'16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase= AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) __lowercase, __lowercase, __lowercase, __lowercase= get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) __lowercase= AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase__ ) , labelaid=lowercase__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): __lowercase= TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , ) def compute_metrics(lowercase__ ) -> Dict: __lowercase= np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer __lowercase= TFTrainer( model=lowercase__ , args=lowercase__ , train_dataset=lowercase__ , eval_dataset=lowercase__ , compute_metrics=lowercase__ , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase= {} if training_args.do_eval: logger.info('*** Evaluate ***' ) __lowercase= trainer.evaluate() __lowercase= os.path.join(training_args.output_dir , 'eval_results.txt' ) with open(lowercase__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) results.update(lowercase__ ) return results if __name__ == "__main__": main()
304
0
import comet # From: unbabel-comet import torch import datasets A__ = datasets.logging.get_logger(__name__) A__ = """\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } """ A__ = """\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. """ A__ = """ COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric('comet') >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def snake_case ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """sources""": datasets.Value("""string""" , id="""sequence""" ), """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[ """https://github.com/Unbabel/COMET""", """https://www.aclweb.org/anthology/2020.emnlp-main.213/""", """http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""", ] , ) def snake_case ( self , _snake_case ): """simple docstring""" if self.config_name == "default": _lowerCAmelCase = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) ) else: _lowerCAmelCase = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case=None , _snake_case=False ): """simple docstring""" if gpus is None: _lowerCAmelCase = 1 if torch.cuda.is_available() else 0 _lowerCAmelCase = {"""src""": sources, """mt""": predictions, """ref""": references} _lowerCAmelCase = [dict(zip(_snake_case , _snake_case ) ) for t in zip(*data.values() )] _lowerCAmelCase , _lowerCAmelCase = self.scorer.predict(_snake_case , gpus=_snake_case , progress_bar=_snake_case ) return {"mean_score": mean_score, "scores": scores}
82
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case : Dict = logging.get_logger(__name__) snake_case : Tuple = '''▁''' snake_case : Any = {'''vocab_file''': '''sentencepiece.bpe.model'''} snake_case : Tuple = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } snake_case : int = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask'] def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase = None , **_lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token a :int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_lowerCamelCase ) ) a :str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token a :Tuple = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab a :List[str] = 1 a :Dict = len(self.sp_model ) + self.fairseq_offset a :List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ): a :List[str] = self.__dict__.copy() a :Optional[int] = None a :int = self.sp_model.serialized_model_proto() return state def __setstate__( self , _lowerCamelCase ): a :Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): a :Union[str, Any] = {} a :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a :List[Any] = [self.cls_token_id] a :Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase ) if token_ids_a is None: return [1] + ([0] * len(_lowerCamelCase )) + [1] return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1] def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): a :int = [self.sep_token_id] a :int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self ): return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def SCREAMING_SNAKE_CASE__ ( self ): a :Any = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] a :Optional[Any] = self.sp_model.PieceToId(_lowerCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None ): if not os.path.isdir(_lowerCamelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return a :int = os.path.join( _lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _lowerCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_lowerCamelCase , '''wb''' ) as fi: a :List[Any] = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase ) return (out_vocab_file,)
94
0
from collections import defaultdict def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ): __a : str = first_str.lower().strip() __a : str = second_str.lower().strip() # Remove whitespace __a : List[Any] = first_str.replace(''' ''' , '''''' ) __a : Dict = second_str.replace(''' ''' , '''''' ) # Strings of different lengths are not anagrams if len(lowerCamelCase_ ) != len(lowerCamelCase_ ): return False # Default values for count should be 0 __a : defaultdict[str, int] = defaultdict(lowerCamelCase_ ) # For each character in input strings, # increment count in the corresponding for i in range(len(lowerCamelCase_ ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() lowercase__ =input('Enter the first string ').strip() lowercase__ =input('Enter the second string ').strip() lowercase__ =check_anagrams(input_a, input_b) print(F"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
368
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowercase__ ='bert-base-cased' lowercase__ ='google/pegasus-xsum' lowercase__ =[' Sam ate lunch today.', 'Sams lunch ingredients.'] lowercase__ =['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] lowercase__ ='patrickvonplaten/t5-tiny-random' lowercase__ ='sshleifer/bart-tiny-random' lowercase__ ='sshleifer/tiny-mbart' lowercase__ ='sshleifer/tiny-marian-en-de' def __UpperCamelCase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : list ): __a : List[Any] = '''\n'''.join(lowerCAmelCase__ ) Path(lowerCAmelCase__ ).open('''w''' ).writelines(lowerCAmelCase__ ) def __UpperCamelCase ( lowerCAmelCase__ : int ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.source" ) , lowerCAmelCase__ ) _dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.target" ) , lowerCAmelCase__ ) return tmp_dir class UpperCamelCase__ ( __lowercase ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCAmelCase (self : int , snake_case_ : int ): __a : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ ) __a : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __a : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES ) __a : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES ) __a : str = 4 __a : Dict = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __a , __a : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. __a : List[Any] = SeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , ) __a : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(snake_case_ , snake_case_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __a : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCAmelCase (self : Optional[Any] , snake_case_ : str ): __a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ ) __a : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES ) __a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES ) __a : Dict = 4 __a : Optional[int] = LegacySeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=2_0 , max_target_length=snake_case_ , ) __a : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCAmelCase (self : List[str] ): __a : int = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) __a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __a : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines() __a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ ) __a : Optional[Any] = {x.name for x in tmp_dir.iterdir()} __a : Union[str, Any] = {x.name for x in save_dir.iterdir()} __a : str = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(snake_case_ ) < len(snake_case_ ) assert len(snake_case_ ) == 1 assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def lowerCAmelCase (self : Any ): if not FAIRSEQ_AVAILABLE: return __a , __a , __a : Any = self._get_dataset(max_len=6_4 ) __a : int = 6_4 __a : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ ) __a : List[str] = [len(snake_case_ ) for x in batch_sampler] assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples __a : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 ) __a : Tuple = [] __a : Union[str, Any] = [] for batch in data_loader: __a : Any = batch['''input_ids'''].shape __a : str = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __a : Optional[Any] = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(snake_case_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(snake_case_ ) assert num_src_per_batch[0] == max(snake_case_ ) if failures: raise AssertionError(f"too many tokens in {len(snake_case_ )} batches" ) def lowerCAmelCase (self : int ): __a , __a , __a : Optional[int] = self._get_dataset(max_len=5_1_2 ) __a : Union[str, Any] = 2 __a : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ ) __a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 ) __a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ ) __a : Optional[int] = tokenizer.pad_token_id def count_pad_tokens(snake_case_ : Union[str, Any] , snake_case_ : List[str]="input_ids" ): return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) < sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) ) assert len(snake_case_ ) == len(snake_case_ ) def lowerCAmelCase (self : int , snake_case_ : int=1_0_0_0 , snake_case_ : Optional[Any]=1_2_8 ): if os.getenv('''USE_REAL_DATA''' , snake_case_ ): __a : Optional[int] = '''examples/seq2seq/wmt_en_ro''' __a : List[Any] = max_len * 2 * 6_4 if not Path(snake_case_ ).joinpath('''train.len''' ).exists(): save_len_file(snake_case_ , snake_case_ ) else: __a : int = '''examples/seq2seq/test_data/wmt_en_ro''' __a : List[str] = max_len * 4 save_len_file(snake_case_ , snake_case_ ) __a : str = AutoTokenizer.from_pretrained(snake_case_ ) __a : Optional[int] = SeqaSeqDataset( snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , ) return ds, max_tokens, tokenizer def lowerCAmelCase (self : List[str] ): __a , __a , __a : str = self._get_dataset() __a : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) ) __a : Tuple = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) ) assert idsa.intersection(snake_case_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] ): __a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ ) if tok_name == MBART_TINY: __a : Any = SeqaSeqDataset( snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) __a : Tuple = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __a : Optional[Any] = SeqaSeqDataset( snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) __a : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
90
0
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __lowerCAmelCase ( UpperCamelCase__=None ) -> Tuple: if subparsers is not None: __lowerCamelCase = subparsers.add_parser('''env''' ) else: __lowerCamelCase = argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''' , default=UpperCamelCase__ , help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase__ ) return parser def __lowerCAmelCase ( UpperCamelCase__ ) -> Union[str, Any]: __lowerCamelCase = torch.__version__ __lowerCamelCase = torch.cuda.is_available() __lowerCamelCase = is_xpu_available() __lowerCamelCase = is_npu_available() __lowerCamelCase = '''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCamelCase__ ): __lowerCamelCase = load_config_from_file(args.config_file ).to_dict() __lowerCamelCase = { '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""", '''PyTorch XPU available''': str(UpperCamelCase__ ), '''PyTorch NPU available''': str(UpperCamelCase__ ), '''System RAM''': f"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""", } if pt_cuda_available: __lowerCamelCase = torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) __lowerCamelCase = ( '''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else f"""\t{accelerate_config}""" ) print(UpperCamelCase__ ) __lowerCamelCase = accelerate_config return info def __lowerCAmelCase ( ) -> int: __lowerCamelCase = env_command_parser() __lowerCamelCase = parser.parse_args() env_command(UpperCamelCase__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
67
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ={ "abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json", } class a__ ( UpperCAmelCase__ ): lowerCamelCase : Optional[int] ="gpt_neox_japanese" def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ): """simple docstring""" super().__init__(bos_token_id=a , eos_token_id=a , **a ) __lowerCamelCase = vocab_size __lowerCamelCase = max_position_embeddings __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_multiple_size __lowerCamelCase = hidden_act __lowerCamelCase = rotary_pct __lowerCamelCase = rotary_emb_base __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = use_cache __lowerCamelCase = attention_dropout __lowerCamelCase = hidden_dropout
67
1
'''simple docstring''' def _lowerCamelCase ( lowerCamelCase_ : int = 1000 ): """simple docstring""" return sum(e for e in range(3 , lowerCamelCase_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
362
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=snake_case_ , ) assert hasattr(self , 'env' ) def _UpperCamelCase ( self , snake_case_=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=snake_case_ , instance_type=self.instance_type , debugger_hook_config=snake_case_ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , ) def _UpperCamelCase ( self , snake_case_ ): '''simple docstring''' TrainingJobAnalytics(snake_case_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase_ : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase_ : Dict = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) UpperCAmelCase_ : List[str] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase_ : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , snake_case_ )
274
0
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def __lowerCamelCase ( UpperCAmelCase_ : List[str] ): """simple docstring""" a :str = tmp_path / '''file.csv''' a :List[str] = textwrap.dedent( '''\ header1,header2 1,2 10,20 ''' ) with open(UpperCAmelCase_ , '''w''' ) as f: f.write(UpperCAmelCase_ ) return str(UpperCAmelCase_ ) @pytest.fixture def __lowerCamelCase ( UpperCAmelCase_ : List[str] ): """simple docstring""" a :Optional[Any] = tmp_path / '''malformed_file.csv''' a :Optional[Any] = textwrap.dedent( '''\ header1,header2 1,2 10,20, ''' ) with open(UpperCAmelCase_ , '''w''' ) as f: f.write(UpperCAmelCase_ ) return str(UpperCAmelCase_ ) @pytest.fixture def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ): """simple docstring""" a :List[Any] = tmp_path / '''csv_with_image.csv''' a :Tuple = textwrap.dedent( F'''\ image {image_file} ''' ) with open(UpperCAmelCase_ , '''w''' ) as f: f.write(UpperCAmelCase_ ) return str(UpperCAmelCase_ ) @pytest.fixture def __lowerCamelCase ( UpperCAmelCase_ : Optional[int] ): """simple docstring""" a :List[Any] = tmp_path / '''csv_with_label.csv''' a :Dict = textwrap.dedent( '''\ label good bad good ''' ) with open(UpperCAmelCase_ , '''w''' ) as f: f.write(UpperCAmelCase_ ) return str(UpperCAmelCase_ ) @pytest.fixture def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" a :int = tmp_path / '''csv_with_int_list.csv''' a :Tuple = textwrap.dedent( '''\ int_list 1 2 3 4 5 6 7 8 9 ''' ) with open(UpperCAmelCase_ , '''w''' ) as f: f.write(UpperCAmelCase_ ) return str(UpperCAmelCase_ ) def __lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] ): """simple docstring""" a :str = Csv() a :Optional[int] = csv._generate_tables([[csv_file, malformed_csv_file]] ) with pytest.raises(UpperCAmelCase_ , match='''Error tokenizing data''' ): for _ in generator: pass assert any( record.levelname == '''ERROR''' and '''Failed to read file''' in record.message and os.path.basename(UpperCAmelCase_ ) in record.message for record in caplog.records ) @require_pil def __lowerCamelCase ( UpperCAmelCase_ : str ): """simple docstring""" with open(UpperCAmelCase_ , encoding='''utf-8''' ) as f: a :Any = f.read().splitlines()[1] a :Union[str, Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) ) a :Dict = csv._generate_tables([[csv_file_with_image]] ) a :Union[str, Any] = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''image''' ).type == Image()() a :Dict = pa_table.to_pydict()['''image'''] assert generated_content == [{"path": image_file, "bytes": None}] def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] ): """simple docstring""" with open(UpperCAmelCase_ , encoding='''utf-8''' ) as f: a :List[str] = f.read().splitlines()[1:] a :int = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) ) a :int = csv._generate_tables([[csv_file_with_label]] ) a :str = pa.concat_tables([table for _, table in generator] ) assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )() a :Union[str, Any] = pa_table.to_pydict()['''label'''] assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(UpperCAmelCase_ ) for label in labels] def __lowerCamelCase ( UpperCAmelCase_ : Tuple ): """simple docstring""" a :str = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda UpperCAmelCase_ : [int(UpperCAmelCase_ ) for i in x.split()]} ) a :Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] ) a :Optional[Any] = pa.concat_tables([table for _, table in generator] ) assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type ) a :int = pa_table.to_pydict()['''int_list'''] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
94
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __A: """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=None , ): UpperCamelCase__ = parent UpperCamelCase__ = batch_size UpperCamelCase__ = image_size UpperCamelCase__ = patch_size UpperCamelCase__ = num_channels UpperCamelCase__ = is_training UpperCamelCase__ = use_labels UpperCamelCase__ = hidden_size UpperCamelCase__ = num_hidden_layers UpperCamelCase__ = num_attention_heads UpperCamelCase__ = intermediate_size UpperCamelCase__ = hidden_act UpperCamelCase__ = hidden_dropout_prob UpperCamelCase__ = attention_probs_dropout_prob UpperCamelCase__ = type_sequence_label_size UpperCamelCase__ = initializer_range UpperCamelCase__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCamelCase__ = (image_size // patch_size) ** 2 UpperCamelCase__ = num_patches + 1 def UpperCAmelCase_ (self ): UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase__ = None if self.use_labels: UpperCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase__ = self.get_config() return config, pixel_values, labels def UpperCAmelCase_ (self ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = TFViTModel(config=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. UpperCamelCase__ = self.image_size // 2 UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): UpperCamelCase__ = self.type_sequence_label_size UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. UpperCamelCase__ = self.image_size // 2 UpperCamelCase__ = pixel_values[:, :, :image_size, :image_size] UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCamelCase__ = 1 UpperCamelCase__ = TFViTForImageClassification(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.prepare_config_and_inputs() UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs UpperCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class __A( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def UpperCAmelCase_ (self ): UpperCamelCase__ = TFViTModelTester(self ) UpperCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def UpperCAmelCase_ (self ): self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def UpperCAmelCase_ (self ): pass @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def UpperCAmelCase_ (self ): pass def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) UpperCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , tf.keras.layers.Layer ) ) def UpperCAmelCase_ (self ): UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase__ = model_class(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase__ = [*signature.parameters.keys()] UpperCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self ): UpperCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ ) @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def __magic_name__ ( ): '''simple docstring''' UpperCamelCase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class __A( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ (self ): return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCAmelCase_ (self ): UpperCamelCase__ = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ) UpperCamelCase__ = self.default_image_processor UpperCamelCase__ = prepare_img() UpperCamelCase__ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""tf""" ) # forward pass UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits UpperCamelCase__ = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
244
0
from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _SCREAMING_SNAKE_CASE = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def SCREAMING_SNAKE_CASE__ ( __a , __a=None ): require_version(deps[pkg] , __a )
88
import re import string import numpy as np import datasets _SCREAMING_SNAKE_CASE = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _SCREAMING_SNAKE_CASE = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 25.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 50.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True) >>> print(round(results[\"exact_match\"], 1)) 75.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"] >>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results[\"exact_match\"], 1)) 100.0 >>> exact_match = datasets.load_metric(\"exact_match\") >>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"] >>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results[\"exact_match\"], 1)) 33.3 """ _SCREAMING_SNAKE_CASE = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE_ ( datasets.Metric ): def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self : List[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int]=None , _A : Dict=False , _A : Dict=False , _A : Optional[Any]=False , ) -> List[str]: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: snake_case_ : List[str] = np.array([re.sub(_A , '' , _A ) for x in predictions] ) snake_case_ : int = np.array([re.sub(_A , '' , _A ) for x in references] ) else: snake_case_ : Optional[Any] = np.asarray(_A ) snake_case_ : Optional[Any] = np.asarray(_A ) if ignore_case: snake_case_ : int = np.char.lower(_A ) snake_case_ : List[str] = np.char.lower(_A ) if ignore_punctuation: snake_case_ : str = string.punctuation.maketrans('' , '' , string.punctuation ) snake_case_ : str = np.char.translate(_A , table=_A ) snake_case_ : Any = np.char.translate(_A , table=_A ) if ignore_numbers: snake_case_ : int = string.digits.maketrans('' , '' , string.digits ) snake_case_ : Tuple = np.char.translate(_A , table=_A ) snake_case_ : Optional[Any] = np.char.translate(_A , table=_A ) snake_case_ : Optional[Any] = predictions == references return {"exact_match": np.mean(_A ) * 100}
88
1
import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> List[Any]: super().__init__(features=UpperCamelCase__ , cache_dir=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase : str = Sql( cache_dir=UpperCamelCase__ , features=UpperCamelCase__ , sql=UpperCamelCase__ , con=UpperCamelCase__ , **UpperCamelCase__ , ) def _lowercase ( self ) -> List[Any]: lowerCamelCase : List[Any] = None lowerCamelCase : List[str] = None lowerCamelCase : int = None lowerCamelCase : Optional[Any] = None self.builder.download_and_prepare( download_config=UpperCamelCase__ , download_mode=UpperCamelCase__ , verification_mode=UpperCamelCase__ , base_path=UpperCamelCase__ , ) # Build dataset for splits lowerCamelCase : Any = self.builder.as_dataset( split="train" , verification_mode=UpperCamelCase__ , in_memory=self.keep_in_memory ) return dataset class UpperCamelCase__ : '''simple docstring''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Any: if num_proc is not None and num_proc <= 0: raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' ) lowerCamelCase : int = dataset lowerCamelCase : int = name lowerCamelCase : Optional[int] = con lowerCamelCase : List[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE lowerCamelCase : int = num_proc lowerCamelCase : int = to_sql_kwargs def _lowercase ( self ) -> int: lowerCamelCase : Optional[Any] = self.to_sql_kwargs.pop("sql" , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = self.to_sql_kwargs.pop("con" , UpperCamelCase__ ) lowerCamelCase : str = self.to_sql_kwargs.pop("index" , UpperCamelCase__ ) lowerCamelCase : str = self._write(index=UpperCamelCase__ , **self.to_sql_kwargs ) return written def _lowercase ( self , UpperCamelCase__ ) -> str: lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = args lowerCamelCase : Tuple = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs lowerCamelCase : List[str] = query_table( table=self.dataset.data , key=slice(UpperCamelCase__ , offset + self.batch_size ) , indices=self.dataset._indices , ) lowerCamelCase : List[Any] = batch.to_pandas() lowerCamelCase : Any = df.to_sql(self.name , self.con , index=UpperCamelCase__ , **UpperCamelCase__ ) return num_rows or len(UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int: lowerCamelCase : Tuple = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: lowerCamelCase , lowerCamelCase : Tuple = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , UpperCamelCase__ , UpperCamelCase__ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ): written += num_rows return written
48
"""simple docstring""" import baseaa def _A ( UpperCamelCase_ : str) -> bytes: '''simple docstring''' return baseaa.baaencode(string.encode("utf-8")) def _A ( UpperCamelCase_ : bytes) -> str: '''simple docstring''' return baseaa.baadecode(UpperCamelCase_).decode("utf-8") if __name__ == "__main__": _a = 'Hello World!' _a = baseaa_encode(test) print(encoded) _a = baseaa_decode(encoded) print(decoded)
17
0
'''simple docstring''' def __UpperCamelCase ( _UpperCAmelCase = 200 ): __UpperCAmelCase : Optional[Any] = [1, 2, 5, 10, 20, 50, 100, 200] __UpperCAmelCase : int = [0] * (pence + 1) __UpperCAmelCase : List[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(__lowerCamelCase, pence + 1, 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
367
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class SCREAMING_SNAKE_CASE__ ( snake_case__ ): """simple docstring""" SCREAMING_SNAKE_CASE = 42 class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ): """simple docstring""" @register_to_config def __init__( self : Union[str, Any] , UpperCAmelCase_ : int = 65_536 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : str = "fourier" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase_ : str = None , UpperCAmelCase_ : Tuple[int] = (32, 32, 64) , UpperCAmelCase_ : str = None , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : bool = False , ): """simple docstring""" super().__init__() __UpperCAmelCase : str = sample_size # time if time_embedding_type == "fourier": __UpperCAmelCase : int = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=UpperCAmelCase_ , log=UpperCAmelCase_ , flip_sin_to_cos=UpperCAmelCase_ ) __UpperCAmelCase : str = 2 * block_out_channels[0] elif time_embedding_type == "positional": __UpperCAmelCase : str = Timesteps( block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase_ , downscale_freq_shift=UpperCAmelCase_ ) __UpperCAmelCase : Dict = block_out_channels[0] if use_timestep_embedding: __UpperCAmelCase : Union[str, Any] = block_out_channels[0] * 4 __UpperCAmelCase : str = TimestepEmbedding( in_channels=UpperCAmelCase_ , time_embed_dim=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , out_dim=block_out_channels[0] , ) __UpperCAmelCase : Tuple = nn.ModuleList([] ) __UpperCAmelCase : int = None __UpperCAmelCase : Optional[Any] = nn.ModuleList([] ) __UpperCAmelCase : Dict = None # down __UpperCAmelCase : str = in_channels for i, down_block_type in enumerate(UpperCAmelCase_ ): __UpperCAmelCase : Optional[Any] = output_channel __UpperCAmelCase : Optional[int] = block_out_channels[i] if i == 0: input_channel += extra_in_channels __UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1 __UpperCAmelCase : List[str] = get_down_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(UpperCAmelCase_ ) # mid __UpperCAmelCase : Optional[Any] = get_mid_block( UpperCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase_ , add_downsample=UpperCAmelCase_ , ) # up __UpperCAmelCase : Tuple = list(reversed(UpperCAmelCase_ ) ) __UpperCAmelCase : Any = reversed_block_out_channels[0] if out_block_type is None: __UpperCAmelCase : Union[str, Any] = out_channels else: __UpperCAmelCase : Dict = block_out_channels[0] for i, up_block_type in enumerate(UpperCAmelCase_ ): __UpperCAmelCase : int = output_channel __UpperCAmelCase : str = ( reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase_ ) - 1 else final_upsample_channels ) __UpperCAmelCase : Tuple = i == len(UpperCAmelCase_ ) - 1 __UpperCAmelCase : Dict = get_up_block( UpperCAmelCase_ , num_layers=UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(UpperCAmelCase_ ) __UpperCAmelCase : Union[str, Any] = output_channel # out __UpperCAmelCase : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 ) __UpperCAmelCase : List[Any] = get_out_block( out_block_type=UpperCAmelCase_ , num_groups_out=UpperCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase_ , act_fn=UpperCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : bool = True , ): """simple docstring""" __UpperCAmelCase : Dict = timestep if not torch.is_tensor(UpperCAmelCase_ ): __UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(UpperCAmelCase_ ) and len(timesteps.shape ) == 0: __UpperCAmelCase : List[str] = timesteps[None].to(sample.device ) __UpperCAmelCase : List[str] = self.time_proj(UpperCAmelCase_ ) if self.config.use_timestep_embedding: __UpperCAmelCase : Any = self.time_mlp(UpperCAmelCase_ ) else: __UpperCAmelCase : Any = timestep_embed[..., None] __UpperCAmelCase : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) __UpperCAmelCase : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down __UpperCAmelCase : int = () for downsample_block in self.down_blocks: __UpperCAmelCase , __UpperCAmelCase : int = downsample_block(hidden_states=UpperCAmelCase_ , temb=UpperCAmelCase_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: __UpperCAmelCase : List[str] = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): __UpperCAmelCase : Any = down_block_res_samples[-1:] __UpperCAmelCase : List[Any] = down_block_res_samples[:-1] __UpperCAmelCase : str = upsample_block(UpperCAmelCase_ , res_hidden_states_tuple=UpperCAmelCase_ , temb=UpperCAmelCase_ ) # 5. post-process if self.out_block: __UpperCAmelCase : Tuple = self.out_block(UpperCAmelCase_ , UpperCAmelCase_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=UpperCAmelCase_ )
37
0
'''simple docstring''' def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: lowercase__ : Dict = mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: lowercase__ : Optional[Any] = max( mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , mf_knapsack(i - 1 , UpperCAmelCase , UpperCAmelCase , j - wt[i - 1] ) + val[i - 1] , ) lowercase__ : str = val return f[i][j] def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): lowercase__ : str = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: lowercase__ : Union[str, Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: lowercase__ : Union[str, Any] = dp[i - 1][w_] return dp[n][w_], dp def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): if not (isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(UpperCAmelCase , (list, tuple) )): raise ValueError( '''Both the weights and values vectors must be either lists or tuples''' ) lowercase__ : Tuple = len(UpperCAmelCase ) if num_items != len(UpperCAmelCase ): lowercase__ : List[str] = ( '''The number of weights must be the same as the number of values.\n''' F"""But got {num_items} weights and {len(UpperCAmelCase )} values""" ) raise ValueError(UpperCAmelCase ) for i in range(UpperCAmelCase ): if not isinstance(wt[i] , UpperCAmelCase ): lowercase__ : List[Any] = ( '''All weights must be integers but got weight of ''' F"""type {type(wt[i] )} at index {i}""" ) raise TypeError(UpperCAmelCase ) lowercase__ , lowercase__ : Union[str, Any] = knapsack(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) lowercase__ : set = set() _construct_solution(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return optimal_val, example_optional_set def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(UpperCAmelCase , UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase ) else: optimal_set.add(UpperCAmelCase ) _construct_solution(UpperCAmelCase , UpperCAmelCase , i - 1 , j - wt[i - 1] , UpperCAmelCase ) if __name__ == "__main__": __a: Tuple = [3, 2, 4, 4] __a: Tuple = [4, 3, 2, 3] __a: int = 4 __a: List[str] = 6 __a: int = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] __a , __a: Optional[Any] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 __a , __a: Optional[Any] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
198
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase ( a__ ): '''simple docstring''' def _lowerCAmelCase( self ) -> List[str]: lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__lowerCAmelCase , '''width_multiplier''' ) ) class UpperCAmelCase : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=64 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase="swish" , __lowerCAmelCase=3 , __lowerCAmelCase=32 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=None , __lowerCAmelCase=0.2_5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , ) -> List[Any]: lowercase__ : List[str] = parent lowercase__ : List[Any] = batch_size lowercase__ : List[str] = image_size lowercase__ : Optional[int] = patch_size lowercase__ : Tuple = num_channels lowercase__ : List[str] = make_divisible(512 * width_multiplier , divisor=8 ) lowercase__ : Optional[int] = hidden_act lowercase__ : List[Any] = conv_kernel_size lowercase__ : Dict = output_stride lowercase__ : List[Any] = classifier_dropout_prob lowercase__ : str = use_labels lowercase__ : List[Any] = is_training lowercase__ : Tuple = num_labels lowercase__ : Optional[int] = initializer_range lowercase__ : Tuple = scope lowercase__ : List[Any] = width_multiplier lowercase__ : Optional[int] = ffn_dropout lowercase__ : int = attn_dropout def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Any = None lowercase__ : Tuple = None if self.use_labels: lowercase__ : str = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels, pixel_labels def _lowerCAmelCase( self ) -> Tuple: return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: lowercase__ : Optional[int] = MobileViTVaModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : str = model(__lowerCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: lowercase__ : Optional[Any] = self.num_labels lowercase__ : Dict = MobileViTVaForImageClassification(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: lowercase__ : str = self.num_labels lowercase__ : List[Any] = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowercase__ : int = model(__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase__ : Union[str, Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _lowerCAmelCase( self ) -> int: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs lowercase__ : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase ( a__ , a__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = ( { "feature-extraction": MobileViTVaModel, "image-classification": MobileViTVaForImageClassification, "image-segmentation": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False def _lowerCAmelCase( self ) -> int: lowercase__ : Tuple = MobileViTVaModelTester(self ) lowercase__ : Any = MobileViTVaConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def _lowerCAmelCase( self ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' ) def _lowerCAmelCase( self ) -> str: pass @unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' ) def _lowerCAmelCase( self ) -> Optional[Any]: pass @unittest.skip(reason='''MobileViTV2 does not output attentions''' ) def _lowerCAmelCase( self ) -> Optional[int]: pass @require_torch_multi_gpu @unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' ) def _lowerCAmelCase( self ) -> Any: pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowerCAmelCase( self ) -> str: pass def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = model_class(__lowerCAmelCase ) lowercase__ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : str = [*signature.parameters.keys()] lowercase__ : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> str: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Union[str, Any]: def check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): lowercase__ : Optional[int] = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : Optional[int] = 5 self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowercase__ : str = 2 for i in range(len(__lowerCAmelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Tuple = True check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Dict: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCAmelCase ) @slow def _lowerCAmelCase( self ) -> List[str]: for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Dict = MobileViTVaModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) def __UpperCamelCase ( ): lowercase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def _lowerCAmelCase( self ) -> int: return ( MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ) if is_vision_available() else None ) @slow def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : Dict = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to( __lowerCAmelCase ) lowercase__ : List[Any] = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(**__lowerCAmelCase ) # verify the logits lowercase__ : Optional[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __lowerCAmelCase ) lowercase__ : int = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : int = model.to(__lowerCAmelCase ) lowercase__ : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : str = prepare_img() lowercase__ : Optional[int] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : str = model(**__lowerCAmelCase ) lowercase__ : Tuple = outputs.logits # verify the logits lowercase__ : List[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __lowerCAmelCase ) lowercase__ : Union[str, Any] = torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=__lowerCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCAmelCase , atol=1E-4 ) ) @slow def _lowerCAmelCase( self ) -> Any: lowercase__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : List[str] = model.to(__lowerCAmelCase ) lowercase__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' ) lowercase__ : int = prepare_img() lowercase__ : List[str] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase__ : Optional[Any] = model(**__lowerCAmelCase ) lowercase__ : Optional[int] = outputs.logits.detach().cpu() lowercase__ : Any = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase , target_sizes=[(50, 60)] ) lowercase__ : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase ) lowercase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__lowerCAmelCase ) lowercase__ : Union[str, Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __lowerCAmelCase )
198
1
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 lowercase_ = get_tests_dir("""fixtures""") class _snake_case ( unittest.TestCase): def A__ ( self : Tuple ): # A mock response for an HTTP head request to emulate server down lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Download this model to make sure it's in the cache. lowercase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request", return_value=__lowercase ) as mock_head: lowercase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def A__ ( self : Dict ): # This test is for deprecated behavior and can be removed in v5 lowercase__ = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def A__ ( self : List[str] ): with self.assertRaises(__lowercase ): # config is in subfolder, the following should not work without specifying the subfolder lowercase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) lowercase__ = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants", subfolder="feature_extractor" ) self.assertIsNotNone(__lowercase ) @is_staging_test class _snake_case ( unittest.TestCase): @classmethod def A__ ( cls : int ): lowercase__ = TOKEN HfFolder.save_token(__lowercase ) @classmethod def A__ ( cls : Optional[int] ): try: delete_repo(token=cls._token, repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="test-dynamic-image-processor" ) except HTTPError: pass def A__ ( self : Tuple ): lowercase__ = ViTImageProcessor.from_pretrained(__lowercase ) image_processor.push_to_hub("test-image-processor", use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase, getattr(__lowercase, __lowercase ) ) # Reset repo delete_repo(token=self._token, repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __lowercase, repo_id="test-image-processor", push_to_hub=__lowercase, use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase, getattr(__lowercase, __lowercase ) ) def A__ ( self : str ): lowercase__ = ViTImageProcessor.from_pretrained(__lowercase ) image_processor.push_to_hub("valid_org/test-image-processor", use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase, getattr(__lowercase, __lowercase ) ) # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __lowercase, repo_id="valid_org/test-image-processor-org", push_to_hub=__lowercase, use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__lowercase, getattr(__lowercase, __lowercase ) ) def A__ ( self : List[str] ): CustomImageProcessor.register_for_auto_class() lowercase__ = CustomImageProcessor.from_pretrained(__lowercase ) image_processor.push_to_hub("test-dynamic-image-processor", use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"}, ) lowercase__ = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''', trust_remote_code=__lowercase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, "CustomImageProcessor" )
224
import fire from utils import calculate_rouge, save_json def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ): lowercase__ = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()] lowercase__ = [x.strip() for x in open(SCREAMING_SNAKE_CASE_ ).readlines()][: len(SCREAMING_SNAKE_CASE_ )] lowercase__ = calculate_rouge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if save_path is not None: save_json(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
224
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline A_ = logging.get_logger(__name__) # pylint: disable=invalid-name class lowercase( __a ): '''simple docstring''' def __init__( self: List[Any], a_: List[Any], a_: Tuple ): '''simple docstring''' super().__init__() self.register_modules(unet=a_, scheduler=a_ ) @torch.no_grad() def __call__( self: Optional[Any], a_: int = 1, a_: int = 100, a_: Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_: Optional[float] = None, a_: bool = True, ): '''simple docstring''' if audio_length_in_s is None: _snake_case : Dict = self.unet.config.sample_size / self.unet.config.sample_rate _snake_case : Any = audio_length_in_s * self.unet.config.sample_rate _snake_case : int = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) _snake_case : str = int(a_ ) if sample_size % down_scale_factor != 0: _snake_case : Optional[Any] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" """ process.""" ) _snake_case : List[Any] = int(a_ ) _snake_case : Dict = next(iter(self.unet.parameters() ) ).dtype _snake_case : Any = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(a_, a_ ) and len(a_ ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(a_ )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) _snake_case : Union[str, Any] = randn_tensor(a_, generator=a_, device=self.device, dtype=a_ ) # set step values self.scheduler.set_timesteps(a_, device=audio.device ) _snake_case : Union[str, Any] = self.scheduler.timesteps.to(a_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output _snake_case : Any = self.unet(a_, a_ ).sample # 2. compute previous image: x_t -> t_t-1 _snake_case : int = self.scheduler.step(a_, a_, a_ ).prev_sample _snake_case : Any = audio.clamp(-1, 1 ).float().cpu().numpy() _snake_case : str = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=a_ )
64
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class lowercase( __a ): '''simple docstring''' lowercase__ = ["image_processor", "tokenizer"] lowercase__ = "AutoImageProcessor" lowercase__ = "AutoTokenizer" def __init__( self: List[str], a_: List[str]=None, a_: Tuple=None, **a_: Tuple ): '''simple docstring''' _snake_case : str = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""", a_, ) _snake_case : str = kwargs.pop("""feature_extractor""" ) _snake_case : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(a_, a_ ) _snake_case : Dict = self.image_processor _snake_case : Any = False def __call__( self: Any, *a_: Any, **a_: Tuple ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*a_, **a_ ) _snake_case : Dict = kwargs.pop("""images""", a_ ) _snake_case : Optional[Any] = kwargs.pop("""text""", a_ ) if len(a_ ) > 0: _snake_case : Optional[int] = args[0] _snake_case : Tuple = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: _snake_case : Tuple = self.image_processor(a_, *a_, **a_ ) if text is not None: _snake_case : Tuple = self.tokenizer(a_, **a_ ) if text is None: return inputs elif images is None: return encodings else: _snake_case : List[str] = encodings["""input_ids"""] return inputs def UpperCamelCase_ ( self: Optional[int], *a_: Tuple, **a_: List[str] ): '''simple docstring''' return self.tokenizer.batch_decode(*a_, **a_ ) def UpperCamelCase_ ( self: int, *a_: List[str], **a_: int ): '''simple docstring''' return self.tokenizer.decode(*a_, **a_ ) @contextmanager def UpperCamelCase_ ( self: Dict ): '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) _snake_case : Any = True _snake_case : Optional[int] = self.tokenizer yield _snake_case : int = self.image_processor _snake_case : Optional[int] = False def UpperCamelCase_ ( self: Dict, a_: Optional[Any], a_: str=False, a_: Optional[Any]=None ): '''simple docstring''' if added_vocab is None: _snake_case : Dict = self.tokenizer.get_added_vocab() _snake_case : str = {} while tokens: _snake_case : Union[str, Any] = re.search(r"""<s_(.*?)>""", a_, re.IGNORECASE ) if start_token is None: break _snake_case : List[Any] = start_token.group(1 ) _snake_case : str = re.search(rf"</s_{key}>", a_, re.IGNORECASE ) _snake_case : Dict = start_token.group() if end_token is None: _snake_case : List[Any] = tokens.replace(a_, """""" ) else: _snake_case : List[str] = end_token.group() _snake_case : str = re.escape(a_ ) _snake_case : str = re.escape(a_ ) _snake_case : Union[str, Any] = re.search(f"{start_token_escaped}(.*?){end_token_escaped}", a_, re.IGNORECASE ) if content is not None: _snake_case : int = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node _snake_case : List[Any] = self.tokenajson(a_, is_inner_value=a_, added_vocab=a_ ) if value: if len(a_ ) == 1: _snake_case : List[str] = value[0] _snake_case : List[str] = value else: # leaf nodes _snake_case : Tuple = [] for leaf in content.split(r"""<sep/>""" ): _snake_case : Tuple = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": _snake_case : int = leaf[1:-2] # for categorical special tokens output[key].append(a_ ) if len(output[key] ) == 1: _snake_case : int = output[key][0] _snake_case : Any = tokens[tokens.find(a_ ) + len(a_ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:], is_inner_value=a_, added_vocab=a_ ) if len(a_ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""", a_, ) return self.image_processor_class @property def UpperCamelCase_ ( self: Tuple ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""", a_, ) return self.image_processor
64
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) __UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def lowercase__ ( __snake_case : str ): '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: UpperCAmelCase_ : Optional[Any] = model_type_to_module_name(__snake_case ) UpperCAmelCase_ : Optional[Any] = importlib.import_module(F".{module_name}" , 'transformers.models' ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(__snake_case , '__name__' , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. UpperCAmelCase_ : Optional[Any] = importlib.import_module('transformers' ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def lowercase__ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : List[Any] , ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(__snake_case , encoding='utf-8' ) as reader: return json.load(__snake_case ) class lowerCamelCase : '''simple docstring''' def __init__( self ) -> Optional[Any]: raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(_UpperCamelCase ) def __UpperCAmelCase ( cls , _UpperCamelCase , **_UpperCamelCase ) -> str: UpperCAmelCase_ : str = kwargs.pop('config' , _UpperCamelCase ) UpperCAmelCase_ : List[str] = kwargs.pop('trust_remote_code' , _UpperCamelCase ) UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ , UpperCAmelCase_ : Any = ImageProcessingMixin.get_image_processor_dict(_UpperCamelCase , **_UpperCamelCase ) UpperCAmelCase_ : Dict = config_dict.get('image_processor_type' , _UpperCamelCase ) UpperCAmelCase_ : Union[str, Any] = None if "AutoImageProcessor" in config_dict.get('auto_map' , {} ): UpperCAmelCase_ : List[str] = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: UpperCAmelCase_ : Union[str, Any] = config_dict.pop('feature_extractor_type' , _UpperCamelCase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) UpperCAmelCase_ : Dict = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): UpperCAmelCase_ : str = config_dict['auto_map']['AutoFeatureExtractor'] UpperCAmelCase_ : Optional[Any] = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) # It could be in `config.image_processor_type`` UpperCAmelCase_ : Dict = getattr(_UpperCamelCase , 'image_processor_type' , _UpperCamelCase ) if hasattr(_UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map: UpperCAmelCase_ : List[str] = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: UpperCAmelCase_ : Union[str, Any] = image_processor_class_from_name(_UpperCamelCase ) UpperCAmelCase_ : str = image_processor_auto_map is not None UpperCAmelCase_ : Tuple = image_processor_class is not None or type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING UpperCAmelCase_ : Dict = resolve_trust_remote_code( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if has_remote_code and trust_remote_code: UpperCAmelCase_ : Tuple = get_class_from_dynamic_module( _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) UpperCAmelCase_ : Optional[Any] = kwargs.pop('code_revision' , _UpperCamelCase ) if os.path.isdir(_UpperCamelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING: UpperCAmelCase_ : List[str] = IMAGE_PROCESSOR_MAPPING[type(_UpperCamelCase )] return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) raise ValueError( f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a " f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following " f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" ) @staticmethod def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: IMAGE_PROCESSOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
145
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase (_snake_case , _snake_case ): '''simple docstring''' @register_to_config def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> int: super().__init__() UpperCAmelCase_ : str = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase_ : Optional[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase ) else: UpperCAmelCase_ : Any = None UpperCAmelCase_ : Any = torch.nn.Parameter(_UpperCamelCase ) class lowerCamelCase (_snake_case ): '''simple docstring''' _snake_case : VQModel _snake_case : CLIPTextModel _snake_case : CLIPTokenizer _snake_case : TransformeraDModel _snake_case : LearnedClassifierFreeSamplingEmbeddings _snake_case : VQDiffusionScheduler def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]: super().__init__() self.register_modules( vqvae=_UpperCamelCase , transformer=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , scheduler=_UpperCamelCase , learned_classifier_free_sampling_embeddings=_UpperCamelCase , ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = len(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else 1 # get prompt text embeddings UpperCAmelCase_ : str = self.tokenizer( _UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCAmelCase_ : Optional[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) UpperCAmelCase_ : str = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase ) # duplicate text embeddings for each generation per prompt UpperCAmelCase_ : Dict = prompt_embeds.repeat_interleave(_UpperCamelCase , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase_ : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase_ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase , 1 , 1 ) else: UpperCAmelCase_ : List[Any] = [''] * batch_size UpperCAmelCase_ : List[Any] = text_input_ids.shape[-1] UpperCAmelCase_ : Dict = self.tokenizer( _UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' , ) UpperCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase_ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase_ : List[Any] = negative_prompt_embeds.shape[1] UpperCAmelCase_ : Dict = negative_prompt_embeds.repeat(1 , _UpperCamelCase , 1 ) UpperCAmelCase_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self , _UpperCamelCase , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 5.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]: if isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Any = 1 elif isinstance(_UpperCamelCase , _UpperCamelCase ): UpperCAmelCase_ : Tuple = len(_UpperCamelCase ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" ) UpperCAmelCase_ : Union[str, Any] = batch_size * num_images_per_prompt UpperCAmelCase_ : Optional[int] = guidance_scale > 1.0 UpperCAmelCase_ : Any = self._encode_prompt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(_UpperCamelCase )}." ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase_ : Tuple = self.transformer.num_vector_embeds - 1 UpperCAmelCase_ : List[Any] = torch.full(_UpperCamelCase , _UpperCamelCase ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( 'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,' f" {self.transformer.num_vector_embeds - 1} (inclusive)." ) UpperCAmelCase_ : Any = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(_UpperCamelCase , device=self.device ) UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device ) UpperCAmelCase_ : Union[str, Any] = latents for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase_ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase_ : Dict = self.transformer(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase ).sample if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_output.chunk(2 ) UpperCAmelCase_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(_UpperCamelCase , dim=1 , keepdim=_UpperCamelCase ) UpperCAmelCase_ : str = self.truncate(_UpperCamelCase , _UpperCamelCase ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase_ : Optional[int] = model_output.clamp(-7_0 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) UpperCAmelCase_ : str = self.vqvae.config.vq_embed_dim UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase , shape=_UpperCamelCase ) UpperCAmelCase_ : Dict = self.vqvae.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase ).sample UpperCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : int = self.numpy_to_pil(_UpperCamelCase ) if not return_dict: return (image,) return ImagePipelineOutput(images=_UpperCamelCase ) def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.FloatTensor: UpperCAmelCase_ , UpperCAmelCase_ : int = torch.sort(_UpperCamelCase , 1 , descending=_UpperCamelCase ) UpperCAmelCase_ : Dict = torch.exp(_UpperCamelCase ) UpperCAmelCase_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _UpperCamelCase ) UpperCAmelCase_ : List[str] = torch.cat((all_true, keep_mask) , dim=1 ) UpperCAmelCase_ : int = keep_mask[:, :-1, :] UpperCAmelCase_ : Any = keep_mask.gather(1 , indices.argsort(1 ) ) UpperCAmelCase_ : str = log_p_x_0.clone() UpperCAmelCase_ : Any = -torch.inf # -inf = log(0) return rv
145
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Tuple = { '''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''deta''' lowerCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=900 , _UpperCAmelCase=2048 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=True , _UpperCAmelCase=300 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ): '''simple docstring''' if backbone_config is None: logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.') __A : Dict = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4']) else: if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[str] = backbone_config.pop('model_type') __A : str = CONFIG_MAPPING[backbone_model_type] __A : Any = config_class.from_dict(_UpperCAmelCase) __A : int = backbone_config __A : List[str] = num_queries __A : str = max_position_embeddings __A : Optional[int] = d_model __A : Any = encoder_ffn_dim __A : int = encoder_layers __A : str = encoder_attention_heads __A : Dict = decoder_ffn_dim __A : Optional[Any] = decoder_layers __A : List[str] = decoder_attention_heads __A : int = dropout __A : int = attention_dropout __A : int = activation_dropout __A : int = activation_function __A : Any = init_std __A : List[Any] = init_xavier_std __A : List[str] = encoder_layerdrop __A : Optional[Any] = auxiliary_loss __A : str = position_embedding_type # deformable attributes __A : Optional[Any] = num_feature_levels __A : Optional[int] = encoder_n_points __A : int = decoder_n_points __A : int = two_stage __A : Union[str, Any] = two_stage_num_proposals __A : Union[str, Any] = with_box_refine __A : Dict = assign_first_stage if two_stage is True and with_box_refine is False: raise ValueError('If two_stage is True, with_box_refine must be True.') # Hungarian matcher __A : int = class_cost __A : str = bbox_cost __A : str = giou_cost # Loss coefficients __A : Tuple = mask_loss_coefficient __A : Optional[Any] = dice_loss_coefficient __A : int = bbox_loss_coefficient __A : str = giou_loss_coefficient __A : List[str] = eos_coefficient __A : Union[str, Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase) @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' return self.d_model def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : Optional[Any] = copy.deepcopy(self.__dict__) __A : int = self.backbone_config.to_dict() __A : Dict = self.__class__.model_type return output
190
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ : List[str] = { '''configuration_git''': ['''GIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GitConfig''', '''GitVisionConfig'''], '''processing_git''': ['''GitProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Any = [ '''GIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GitForCausalLM''', '''GitModel''', '''GitPreTrainedModel''', '''GitVisionModel''', ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys lowercase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
190
1
import argparse import os import re import packaging.version lowercase_ = "examples/" lowercase_ = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } lowercase_ = { "init": "src/transformers/__init__.py", "setup": "setup.py", } lowercase_ = "README.md" def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ): '''simple docstring''' with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __snake_case : int = f.read() __snake_case , __snake_case : int = REPLACE_PATTERNS[pattern] __snake_case : Tuple = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE ) __snake_case : Optional[Any] = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" ) def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]=False ): '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not patch: update_version_in_examples(__SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( ): '''simple docstring''' __snake_case : Union[str, Any] = """🤗 Transformers currently provides the following architectures""" __snake_case : Optional[Any] = """1. Want to contribute a new model?""" with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: __snake_case : str = f.readlines() # Find the start of the list. __snake_case : Tuple = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __snake_case : List[str] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): __snake_case : str = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( ): '''simple docstring''' with open(REPLACE_FILES["""init"""] , """r""" ) as f: __snake_case : Any = f.read() __snake_case : Tuple = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0] return packaging.version.parse(__SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any]=False ): '''simple docstring''' __snake_case : Optional[Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: __snake_case : str = default_version.base_version elif patch: __snake_case : Tuple = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __snake_case : Optional[int] = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __snake_case : Optional[int] = input(F'''Which version are you releasing? [{default_version}]''' ) if len(__SCREAMING_SNAKE_CASE ) == 0: __snake_case : Union[str, Any] = default_version print(F'''Updating version to {version}.''' ) global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def __lowerCAmelCase ( ): '''simple docstring''' __snake_case : Union[str, Any] = get_version() __snake_case : int = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __snake_case : Optional[int] = current_version.base_version # Check with the user we got that right. __snake_case : Dict = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(__SCREAMING_SNAKE_CASE ) == 0: __snake_case : Optional[Any] = dev_version print(F'''Updating version to {version}.''' ) global_version_update(__SCREAMING_SNAKE_CASE ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") lowercase_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
20
import math def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): '''simple docstring''' if ( not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * power_factor def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): '''simple docstring''' if ( not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError("""power_factor must be a valid float value between -1 and 1.""" ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
20
1
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase ( snake_case_ , unittest.TestCase ): UpperCamelCase : Optional[int] = BertTokenizer UpperCamelCase : str = BertTokenizerFast UpperCamelCase : List[Any] = True UpperCamelCase : str = True UpperCamelCase : Any = filter_non_english def _lowercase ( self : int ) -> List[Any]: super().setUp() _a : Union[str, Any] = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] _a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Tuple , UpperCAmelCase__ : str ) -> Any: _a : Dict = """UNwant\u00E9d,running""" _a : Tuple = """unwanted, running""" return input_text, output_text def _lowercase ( self : Optional[int] ) -> Optional[Any]: _a : Optional[Any] = self.tokenizer_class(self.vocab_file ) _a : Dict = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCAmelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [9, 6, 7, 12, 10, 11] ) def _lowercase ( self : Tuple ) -> List[Any]: if not self.test_rust_tokenizer: return _a : Union[str, Any] = self.get_tokenizer() _a : Tuple = self.get_rust_tokenizer() _a : str = """UNwant\u00E9d,running""" _a : Union[str, Any] = tokenizer.tokenize(UpperCAmelCase__ ) _a : Any = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : Dict = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : int = self.get_rust_tokenizer() _a : int = tokenizer.encode(UpperCAmelCase__ ) _a : List[Any] = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # With lower casing _a : str = self.get_tokenizer(do_lower_case=UpperCAmelCase__ ) _a : Tuple = self.get_rust_tokenizer(do_lower_case=UpperCAmelCase__ ) _a : List[str] = """UNwant\u00E9d,running""" _a : Optional[Any] = tokenizer.tokenize(UpperCAmelCase__ ) _a : List[Any] = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : Union[str, Any] = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Optional[int] = self.get_rust_tokenizer() _a : Any = tokenizer.encode(UpperCAmelCase__ ) _a : List[str] = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> List[str]: _a : str = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] ) def _lowercase ( self : List[Any] ) -> Optional[int]: _a : Optional[Any] = BasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _lowercase ( self : int ) -> Tuple: _a : Optional[Any] = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] ) def _lowercase ( self : Any ) -> Dict: _a : str = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _lowercase ( self : str ) -> Dict: _a : Optional[int] = BasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] ) self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] ) def _lowercase ( self : str ) -> List[str]: _a : Union[str, Any] = BasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _lowercase ( self : Optional[Any] ) -> int: _a : Optional[int] = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _lowercase ( self : Optional[int] ) -> Union[str, Any]: _a : str = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] ) def _lowercase ( self : Optional[int] ) -> Tuple: _a : Tuple = BasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["""[UNK]"""] ) self.assertListEqual( tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] ) def _lowercase ( self : Optional[Any] ) -> int: _a : str = BasicTokenizer() _a : Union[str, Any] = """a\n'll !!to?'d of, can't.""" _a : Optional[Any] = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""] self.assertListEqual(tokenizer.tokenize(UpperCAmelCase__ ) , UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> Dict: _a : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] _a : Tuple = {} for i, token in enumerate(UpperCAmelCase__ ): _a : Optional[Any] = i _a : List[Any] = WordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] ) def _lowercase ( self : int ) -> List[str]: self.assertTrue(_is_whitespace(""" """ ) ) self.assertTrue(_is_whitespace("""\t""" ) ) self.assertTrue(_is_whitespace("""\r""" ) ) self.assertTrue(_is_whitespace("""\n""" ) ) self.assertTrue(_is_whitespace("""\u00A0""" ) ) self.assertFalse(_is_whitespace("""A""" ) ) self.assertFalse(_is_whitespace("""-""" ) ) def _lowercase ( self : Dict ) -> str: self.assertTrue(_is_control("""\u0005""" ) ) self.assertFalse(_is_control("""A""" ) ) self.assertFalse(_is_control(""" """ ) ) self.assertFalse(_is_control("""\t""" ) ) self.assertFalse(_is_control("""\r""" ) ) def _lowercase ( self : Any ) -> str: self.assertTrue(_is_punctuation("""-""" ) ) self.assertTrue(_is_punctuation("""$""" ) ) self.assertTrue(_is_punctuation("""`""" ) ) self.assertTrue(_is_punctuation(""".""" ) ) self.assertFalse(_is_punctuation("""A""" ) ) self.assertFalse(_is_punctuation(""" """ ) ) def _lowercase ( self : Union[str, Any] ) -> Tuple: _a : Optional[int] = self.get_tokenizer() _a : Optional[Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] ) @slow def _lowercase ( self : Tuple ) -> Optional[int]: _a : Any = self.tokenizer_class.from_pretrained("""bert-base-uncased""" ) _a : Any = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase__ ) _a : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase__ ) _a : Tuple = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) _a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _lowercase ( self : Dict ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" _a : str = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) _a : List[str] = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , """do_lower_case""" ) else False _a : List[str] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] ) def _lowercase ( self : Optional[int] ) -> Any: _a : str = ["""的""", """人""", """有"""] _a : Optional[int] = """""".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _a : List[str] = True _a : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) _a : List[str] = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : str = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : int = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) _a : Optional[int] = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = False _a : List[Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Any = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : int = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) _a : Optional[int] = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) _a : List[Any] = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". _a : Any = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
294
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict: if k in (0.0_4, 0.0_6): _a : List[str] = k _a : List[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Dict ) -> str: return str(self.k ) def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: _a : Dict = cva.imread(UpperCAmelCase__ , 0 ) _a , _a : List[Any] = img.shape _a : list[list[int]] = [] _a : List[Any] = img.copy() _a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB ) _a , _a : Any = np.gradient(UpperCAmelCase__ ) _a : Tuple = dx**2 _a : Union[str, Any] = dy**2 _a : Union[str, Any] = dx * dy _a : int = 0.0_4 _a : List[str] = self.window_size // 2 for y in range(UpperCAmelCase__ , h - offset ): for x in range(UpperCAmelCase__ , w - offset ): _a : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : List[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Any = (wxx * wyy) - (wxy**2) _a : Tuple = wxx + wyy _a : Any = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _snake_case = HarrisCorner(0.04, 3) _snake_case , _snake_case = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
294
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a_ : int = { """configuration_clip""": [ """CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPConfig""", """CLIPOnnxConfig""", """CLIPTextConfig""", """CLIPVisionConfig""", ], """processing_clip""": ["""CLIPProcessor"""], """tokenization_clip""": ["""CLIPTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Tuple = ["""CLIPTokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = ["""CLIPFeatureExtractor"""] a_ : List[str] = ["""CLIPImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[Any] = [ """CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPModel""", """CLIPPreTrainedModel""", """CLIPTextModel""", """CLIPTextModelWithProjection""", """CLIPVisionModel""", """CLIPVisionModelWithProjection""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : int = [ """TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCLIPModel""", """TFCLIPPreTrainedModel""", """TFCLIPTextModel""", """TFCLIPVisionModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ """FlaxCLIPModel""", """FlaxCLIPPreTrainedModel""", """FlaxCLIPTextModel""", """FlaxCLIPTextPreTrainedModel""", """FlaxCLIPVisionModel""", """FlaxCLIPVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys a_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
6
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a_ : Tuple = 16 a_ : Optional[int] = 32 def a_ ( __snake_case : Accelerator , __snake_case : int = 16 ) -> str: """simple docstring""" lowerCamelCase_ =AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCamelCase_ =load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(__snake_case : int ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase_ =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase_ =datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase_ =tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase_ =128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase_ =16 elif accelerator.mixed_precision != "no": lowerCamelCase_ =8 else: lowerCamelCase_ =None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCamelCase_ =DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) lowerCamelCase_ =DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a_ : Tuple = mocked_dataloaders # noqa: F811 def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Optional[Any]: """simple docstring""" # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case ) == "1": lowerCamelCase_ =2 # Initialize accelerator lowerCamelCase_ =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase_ =config['''lr'''] lowerCamelCase_ =int(config['''num_epochs'''] ) lowerCamelCase_ =int(config['''seed'''] ) lowerCamelCase_ =int(config['''batch_size'''] ) lowerCamelCase_ =evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=__snake_case ) def inner_training_loop(__snake_case : Union[str, Any] ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(__snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase_ =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase_ =model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase_ =AdamW(params=model.parameters() , lr=__snake_case ) lowerCamelCase_, lowerCamelCase_ =get_dataloaders(__snake_case , __snake_case ) # Instantiate scheduler lowerCamelCase_ =get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase_ =model(**__snake_case ) lowerCamelCase_ =outputs.logits.argmax(dim=-1 ) lowerCamelCase_, lowerCamelCase_ =accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) lowerCamelCase_ =metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __snake_case ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def a_ ( ) -> Dict: """simple docstring""" lowerCamelCase_ =argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCamelCase_ =parser.parse_args() lowerCamelCase_ ={'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
6
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __A = logging.get_logger(__name__) def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str: __lowerCAmelCase: Union[str, Any] = b.T __lowerCAmelCase: str = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=1 ) __lowerCAmelCase: Optional[Any] = np.sum(np.square(__SCREAMING_SNAKE_CASE ) , axis=0 ) __lowerCAmelCase: int = np.matmul(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __lowerCAmelCase: int = aa[:, None] - 2 * ab + ba[None, :] return d def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict: __lowerCAmelCase: Union[str, Any] = x.reshape(-1 , 3 ) __lowerCAmelCase: Optional[Any] = squared_euclidean_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return np.argmin(__SCREAMING_SNAKE_CASE , axis=1 ) class snake_case ( __snake_case ): SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""pixel_values"""] def __init__( self : int , UpperCamelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , **UpperCamelCase__ : Union[str, Any] , )-> None: '''simple docstring''' super().__init__(**UpperCamelCase__) __lowerCAmelCase: Any = size if size is not None else {"height": 2_5_6, "width": 2_5_6} __lowerCAmelCase: int = get_size_dict(UpperCamelCase__) __lowerCAmelCase: List[Any] = np.array(UpperCamelCase__) if clusters is not None else None __lowerCAmelCase: int = do_resize __lowerCAmelCase: int = size __lowerCAmelCase: Tuple = resample __lowerCAmelCase: Union[str, Any] = do_normalize __lowerCAmelCase: Union[str, Any] = do_color_quantize def lowercase_ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , )-> np.ndarray: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = get_size_dict(UpperCamelCase__) if "height" not in size or "width" not in size: raise ValueError(f"Size dictionary must contain both height and width keys. Got {size.keys()}") return resize( UpperCamelCase__ , size=(size["height"], size["width"]) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__) def lowercase_ ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , )-> np.ndarray: '''simple docstring''' __lowerCAmelCase: Dict = rescale(image=UpperCamelCase__ , scale=1 / 127.5 , data_format=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = image - 1 return image def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , )-> PIL.Image.Image: '''simple docstring''' __lowerCAmelCase: Optional[int] = do_resize if do_resize is not None else self.do_resize __lowerCAmelCase: List[str] = size if size is not None else self.size __lowerCAmelCase: Any = get_size_dict(UpperCamelCase__) __lowerCAmelCase: List[Any] = resample if resample is not None else self.resample __lowerCAmelCase: int = do_normalize if do_normalize is not None else self.do_normalize __lowerCAmelCase: Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize __lowerCAmelCase: Optional[Any] = clusters if clusters is not None else self.clusters __lowerCAmelCase: Tuple = np.array(UpperCamelCase__) __lowerCAmelCase: Tuple = make_list_of_images(UpperCamelCase__) if not valid_images(UpperCamelCase__): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray.") if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True.") # All transformations expect numpy arrays. __lowerCAmelCase: Optional[Any] = [to_numpy_array(UpperCamelCase__) for image in images] if do_resize: __lowerCAmelCase: Union[str, Any] = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__) for image in images] if do_normalize: __lowerCAmelCase: List[Any] = [self.normalize(image=UpperCamelCase__) for image in images] if do_color_quantize: __lowerCAmelCase: Union[str, Any] = [to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.LAST) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) __lowerCAmelCase: int = np.array(UpperCamelCase__) __lowerCAmelCase: int = color_quantize(UpperCamelCase__ , UpperCamelCase__).reshape(images.shape[:-1]) # flatten to (batch_size, height*width) __lowerCAmelCase: Optional[Any] = images.shape[0] __lowerCAmelCase: int = images.reshape(UpperCamelCase__ , -1) # We need to convert back to a list of images to keep consistent behaviour across processors. __lowerCAmelCase: Union[str, Any] = list(UpperCamelCase__) else: __lowerCAmelCase: Optional[Any] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__) for image in images] __lowerCAmelCase: int = {"input_ids": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__)
217
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( __snake_case, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : Tuple = BloomTokenizerFast SCREAMING_SNAKE_CASE_ : str = BloomTokenizerFast SCREAMING_SNAKE_CASE_ : int = True SCREAMING_SNAKE_CASE_ : Tuple = False SCREAMING_SNAKE_CASE_ : int = """tokenizer_file""" SCREAMING_SNAKE_CASE_ : List[str] = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def lowercase_ ( self : List[Any])-> Dict: '''simple docstring''' super().setUp() __lowerCAmelCase: Optional[Any] = BloomTokenizerFast.from_pretrained("bigscience/tokenizer") tokenizer.save_pretrained(self.tmpdirname) def lowercase_ ( self : List[Any] , **UpperCamelCase__ : Union[str, Any])-> Optional[Any]: '''simple docstring''' kwargs.update(self.special_tokens_map) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__) def lowercase_ ( self : Union[str, Any])-> Union[str, Any]: '''simple docstring''' __lowerCAmelCase: str = self.get_rust_tokenizer() __lowerCAmelCase: int = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] __lowerCAmelCase: List[str] = [[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]] __lowerCAmelCase: List[str] = tokenizer.batch_encode_plus(UpperCamelCase__)["input_ids"] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) __lowerCAmelCase: List[Any] = tokenizer.batch_decode(UpperCamelCase__) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Tuple=6)-> Tuple: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): __lowerCAmelCase: Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input __lowerCAmelCase: Dict = "This is a simple input" __lowerCAmelCase: str = ["This is a simple input 1", "This is a simple input 2"] __lowerCAmelCase: int = ("This is a simple input", "This is a pair") __lowerCAmelCase: Union[str, Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__) tokenizer_r.encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__) tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__) tokenizer_r.encode(UpperCamelCase__ , max_length=UpperCamelCase__) tokenizer_r.batch_encode_plus(UpperCamelCase__ , max_length=UpperCamelCase__) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding") __lowerCAmelCase: Tuple = None # Hotfixing padding = None self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length") # Simple input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length") # Simple input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , ) # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length") # Pair input self.assertRaises(UpperCamelCase__ , tokenizer_r.encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length") # Pair input self.assertRaises( UpperCamelCase__ , tokenizer_r.batch_encode_plus , UpperCamelCase__ , max_length=UpperCamelCase__ , padding="max_length" , ) def lowercase_ ( self : Optional[Any])-> List[str]: '''simple docstring''' __lowerCAmelCase: Dict = self.get_rust_tokenizer() __lowerCAmelCase: List[str] = load_dataset("xnli" , "all_languages" , split="test" , streaming=UpperCamelCase__) __lowerCAmelCase: Union[str, Any] = next(iter(UpperCamelCase__))["premise"] # pick up one data __lowerCAmelCase: Any = list(sample_data.values()) __lowerCAmelCase: int = list(map(tokenizer.encode , UpperCamelCase__)) __lowerCAmelCase: str = [tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__) for x in output_tokens] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__) def lowercase_ ( self : Optional[int])-> str: '''simple docstring''' self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
217
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _A = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = ['OwlViTFeatureExtractor'] _A = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys _A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
352
import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor _A = logging.get_logger(__name__) class UpperCAmelCase__ ( A_ ): """simple docstring""" def __init__( self , *A_ , **A_ ) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.' , A_ , ) super().__init__(*A_ , **A_ )
117
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __snake_case : int =logging.get_logger(__name__) def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[int]=False ,lowerCamelCase_ : Union[str, Any]=False): '''simple docstring''' lowerCAmelCase__ : Optional[int] = '''backbone.''' if is_semantic else '''''' lowerCAmelCase__ : List[str] = [] for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""")) rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""")) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""")) rename_keys.append( (f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""")) rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""")) rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""")) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""")) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""")) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""")) rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""")) # projection layer + position embeddings rename_keys.extend( [ (f"""{prefix}cls_token""", '''beit.embeddings.cls_token'''), (f"""{prefix}patch_embed.proj.weight""", '''beit.embeddings.patch_embeddings.projection.weight'''), (f"""{prefix}patch_embed.proj.bias""", '''beit.embeddings.patch_embeddings.projection.bias'''), (f"""{prefix}pos_embed""", '''beit.embeddings.position_embeddings'''), ]) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('''mask_token''', '''beit.embeddings.mask_token'''), ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ]) else: # layernorm + classification head rename_keys.extend( [ ('''fc_norm.weight''', '''beit.pooler.layernorm.weight'''), ('''fc_norm.bias''', '''beit.pooler.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ]) return rename_keys def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[Any]=False ,lowerCamelCase_ : Tuple=False): '''simple docstring''' for i in range(config.num_hidden_layers): lowerCAmelCase__ : Union[str, Any] = '''backbone.''' if is_semantic else '''''' # queries, keys and values lowerCAmelCase__ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""") lowerCAmelCase__ : str = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""") lowerCAmelCase__ : Optional[int] = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""") lowerCAmelCase__ : Any = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase__ : Any = q_bias lowerCAmelCase__ : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ : str = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ : int = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowerCAmelCase__ : Dict = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""") lowerCAmelCase__ : List[str] = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""") lowerCAmelCase__ : Optional[Any] = gamma_a lowerCAmelCase__ : Optional[Any] = gamma_a def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : Union[str, Any]): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = dct.pop(lowerCamelCase_) lowerCAmelCase__ : str = val def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase__ : Dict = Image.open(requests.get(lowerCamelCase_ ,stream=lowerCamelCase_).raw) return im @torch.no_grad() def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : int ,lowerCamelCase_ : Tuple=False): '''simple docstring''' lowerCAmelCase__ : Union[str, Any] = False if '''rvlcdip''' in checkpoint_url else True lowerCAmelCase__ : List[str] = BeitConfig(use_absolute_position_embeddings=lowerCamelCase_ ,use_mask_token=lowerCamelCase_) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowerCAmelCase__ : Union[str, Any] = 1024 lowerCAmelCase__ : List[str] = 4096 lowerCAmelCase__ : Union[str, Any] = 24 lowerCAmelCase__ : List[str] = 16 # labels if "rvlcdip" in checkpoint_url: lowerCAmelCase__ : Optional[int] = 16 lowerCAmelCase__ : Optional[Any] = '''huggingface/label-files''' lowerCAmelCase__ : str = '''rvlcdip-id2label.json''' lowerCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(lowerCamelCase_ ,lowerCamelCase_ ,repo_type='''dataset''') ,'''r''')) lowerCAmelCase__ : Optional[Any] = {int(lowerCamelCase_): v for k, v in idalabel.items()} lowerCAmelCase__ : int = idalabel lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowerCAmelCase__ : List[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase_ ,map_location='''cpu''')['''model'''] lowerCAmelCase__ : str = create_rename_keys(lowerCamelCase_ ,has_lm_head=lowerCamelCase_) for src, dest in rename_keys: rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_) read_in_q_k_v(lowerCamelCase_ ,lowerCamelCase_ ,has_lm_head=lowerCamelCase_) # load HuggingFace model lowerCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling(lowerCamelCase_) if has_lm_head else BeitForImageClassification(lowerCamelCase_) model.eval() model.load_state_dict(lowerCamelCase_) # Check outputs on an image lowerCAmelCase__ : Tuple = BeitImageProcessor( size=config.image_size ,resample=PILImageResampling.BILINEAR ,do_center_crop=lowerCamelCase_) lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=lowerCamelCase_ ,return_tensors='''pt''') lowerCAmelCase__ : Tuple = encoding['''pixel_values'''] lowerCAmelCase__ : str = model(lowerCamelCase_) lowerCAmelCase__ : List[Any] = outputs.logits # verify logits lowerCAmelCase__ : Dict = [1, 16] if '''rvlcdip''' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(lowerCamelCase_), "Shape of logits not as expected" Path(lowerCamelCase_).mkdir(exist_ok=lowerCamelCase_) print(f"""Saving model to {pytorch_dump_folder_path}""") model.save_pretrained(lowerCamelCase_) print(f"""Saving image processor to {pytorch_dump_folder_path}""") image_processor.save_pretrained(lowerCamelCase_) if push_to_hub: if has_lm_head: lowerCAmelCase__ : str = '''dit-base''' if '''base''' in checkpoint_url else '''dit-large''' else: lowerCAmelCase__ : Optional[int] = '''dit-base-finetuned-rvlcdip''' if '''dit-b''' in checkpoint_url else '''dit-large-finetuned-rvlcdip''' image_processor.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ ,lowerCamelCase_) ,organization='''nielsr''' ,commit_message='''Add image processor''' ,use_temp_dir=lowerCamelCase_ ,) model.push_to_hub( repo_path_or_name=Path(lowerCamelCase_ ,lowerCamelCase_) ,organization='''nielsr''' ,commit_message='''Add model''' ,use_temp_dir=lowerCamelCase_ ,) if __name__ == "__main__": __snake_case : Union[str, Any] =argparse.ArgumentParser() parser.add_argument( '--checkpoint_url', default='https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth', type=str, help='URL to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', ) __snake_case : List[Any] =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
129
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def __UpperCAmelCase ( A : List[str] , A : Any , A : Optional[int] , A : Optional[int] ) -> Optional[Any]: if isinstance(A , A ): UpperCAmelCase_ : Any = np.full((len(A ), sequence_length, 2) , A ) else: UpperCAmelCase_ : int = np.full((len(A ), sequence_length) , A ) for i, tensor in enumerate(A ): if padding_side == "right": if isinstance(A , A ): UpperCAmelCase_ : Tuple = tensor[:sequence_length] else: UpperCAmelCase_ : Dict = tensor[:sequence_length] else: if isinstance(A , A ): UpperCAmelCase_ : Optional[Any] = tensor[:sequence_length] else: UpperCAmelCase_ : int = tensor[:sequence_length] return out_tensor.tolist() def __UpperCAmelCase ( A : List[Any] ) -> str: UpperCAmelCase_ : Dict = ord(A ) if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6): return True UpperCAmelCase_ : Union[str, Any] = unicodedata.category(A ) if cat.startswith('''P''' ): return True return False @dataclass class snake_case__ ( UpperCamelCase): a_ = 42 a_ = True a_ = None a_ = None a_ = -100 a_ = "pt" def A ( self : List[Any] , _A : Dict ) -> Tuple: import torch UpperCAmelCase_ : Dict = '''label''' if '''label''' in features[0].keys() else '''labels''' UpperCAmelCase_ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None UpperCAmelCase_ : Tuple = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , ) if labels is None: return batch UpperCAmelCase_ : Any = torch.tensor(batch['''entity_ids'''] ).shape[1] UpperCAmelCase_ : Union[str, Any] = self.tokenizer.padding_side if padding_side == "right": UpperCAmelCase_ : Optional[Any] = [ list(_A ) + [self.label_pad_token_id] * (sequence_length - len(_A )) for label in labels ] else: UpperCAmelCase_ : Any = [ [self.label_pad_token_id] * (sequence_length - len(_A )) + list(_A ) for label in labels ] UpperCAmelCase_ : Union[str, Any] = [feature['''ner_tags'''] for feature in features] UpperCAmelCase_ : Union[str, Any] = padding_tensor(_A , -1 , _A , _A ) UpperCAmelCase_ : List[str] = [feature['''original_entity_spans'''] for feature in features] UpperCAmelCase_ : int = padding_tensor(_A , (-1, -1) , _A , _A ) UpperCAmelCase_ : Union[str, Any] = {k: torch.tensor(_A , dtype=torch.intaa ) for k, v in batch.items()} return batch
304
0
"""simple docstring""" import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class __lowerCamelCase : def __init__(self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=64 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ): '''simple docstring''' _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = seq_length _lowerCAmelCase = is_training _lowerCAmelCase = use_input_mask _lowerCAmelCase = use_token_type_ids _lowerCAmelCase = use_labels _lowerCAmelCase = vocab_size _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = max_position_embeddings _lowerCAmelCase = type_vocab_size _lowerCAmelCase = type_sequence_label_size _lowerCAmelCase = initializer_range _lowerCAmelCase = num_labels _lowerCAmelCase = num_choices _lowerCAmelCase = scope _lowerCAmelCase = vocab_size - 1 def A__ (self ): '''simple docstring''' _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _lowerCAmelCase = None if self.use_input_mask: _lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _lowerCAmelCase = None if self.use_labels: _lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _lowerCAmelCase = self.get_config() return config, input_ids, input_mask, token_labels def A__ (self ): '''simple docstring''' return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase = True return config, input_ids, input_mask, token_labels def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = GPTNeoXModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase ) _lowerCAmelCase = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = True _lowerCAmelCase = GPTNeoXModel(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = GPTNeoXForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = GPTNeoXForQuestionAnswering(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = GPTNeoXForSequenceClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = self.num_labels _lowerCAmelCase = GPTNeoXForTokenClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A__ (self , lowerCamelCase , lowerCamelCase , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase = True _lowerCAmelCase = GPTNeoXForCausalLM(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() # first forward pass _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase ) _lowerCAmelCase = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids _lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and _lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) _lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 ) _lowerCAmelCase = model(lowerCamelCase , attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase ) _lowerCAmelCase = output_from_no_past["""hidden_states"""][0] _lowerCAmelCase = model( lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["""hidden_states"""][0] # select random slice _lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() _lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach() _lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-3 ) ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs _lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCamelCase ( __lowercase , __lowercase , __lowercase , unittest.TestCase ): __UpperCamelCase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __UpperCamelCase = (GPTNeoXForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def A__ (self ): '''simple docstring''' _lowerCAmelCase = GPTNeoXModelTester(self ) _lowerCAmelCase = ConfigTester(self , config_class=lowerCamelCase , hidden_size=64 , num_attention_heads=8 ) def A__ (self ): '''simple docstring''' self.config_tester.run_common_tests() def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder() _lowerCAmelCase = None self.model_tester.create_and_check_model_as_decoder(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase ) def A__ (self ): '''simple docstring''' _lowerCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def A__ (self ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def A__ (self , lowerCamelCase ): '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size ) _lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase = GPTNeoXModel(lowerCamelCase ) original_model.to(lowerCamelCase ) original_model.eval() _lowerCAmelCase = original_model(lowerCamelCase ).last_hidden_state _lowerCAmelCase = original_model(lowerCamelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights _lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0} _lowerCAmelCase = GPTNeoXModel(lowerCamelCase ) scaled_model.to(lowerCamelCase ) scaled_model.eval() _lowerCAmelCase = scaled_model(lowerCamelCase ).last_hidden_state _lowerCAmelCase = scaled_model(lowerCamelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1e-5 ) ) @require_torch class __lowerCamelCase ( unittest.TestCase ): @slow def A__ (self ): '''simple docstring''' _lowerCAmelCase = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: _lowerCAmelCase = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowerCamelCase ) _lowerCAmelCase = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(lowerCamelCase ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 _lowerCAmelCase = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" _lowerCAmelCase = model.generate(**lowerCamelCase , do_sample=lowerCamelCase , max_new_tokens=20 ) _lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase )[0] self.assertEqual(lowerCamelCase , lowerCamelCase )
317
"""simple docstring""" def __UpperCAmelCase ( snake_case_ : int = 1000000 ) -> int: """simple docstring""" _lowerCAmelCase = limit + 1 _lowerCAmelCase = [0] * limit for first_term in range(1 , snake_case_ ): for n in range(snake_case_ , snake_case_ , snake_case_ ): _lowerCAmelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _lowerCAmelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F'{solution() = }')
317
1
def _UpperCAmelCase ( snake_case ): """simple docstring""" if not isinstance(snake_case , snake_case ): _lowerCAmelCase = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case ) if number < 0: return False _lowerCAmelCase = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
82
from math import pi, sqrt, tan def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float: """simple docstring""" if side_length < 0: raise ValueError('surface_area_cube() only accepts non-negative values' ) return 6 * side_length**2 def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError('surface_area_cuboid() only accepts non-negative values' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float: """simple docstring""" if radius < 0: raise ValueError('surface_area_sphere() only accepts non-negative values' ) return 4 * pi * radius**2 def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float: """simple docstring""" if radius < 0: raise ValueError('surface_area_hemisphere() only accepts non-negative values' ) return 3 * pi * radius**2 def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('surface_area_cone() only accepts non-negative values' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( 'surface_area_conical_frustum() only accepts non-negative values' ) __lowerCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('surface_area_cylinder() only accepts non-negative values' ) return 2 * pi * radius * (height + radius) def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError('surface_area_torus() only accepts non-negative values' ) if torus_radius < tube_radius: raise ValueError( 'surface_area_torus() does not support spindle or self intersecting tori' ) return 4 * pow(UpperCamelCase__ , 2 ) * torus_radius * tube_radius def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if length < 0 or width < 0: raise ValueError('area_rectangle() only accepts non-negative values' ) return length * width def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float: """simple docstring""" if side_length < 0: raise ValueError('area_square() only accepts non-negative values' ) return side_length**2 def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('area_triangle() only accepts non-negative values' ) return (base * height) / 2 def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('area_triangle_three_sides() only accepts non-negative values' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('Given three sides do not form a triangle' ) __lowerCamelCase = (sidea + sidea + sidea) / 2 __lowerCamelCase = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('area_parallelogram() only accepts non-negative values' ) return base * height def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError('area_trapezium() only accepts non-negative values' ) return 1 / 2 * (basea + basea) * height def lowerCamelCase_ ( UpperCamelCase__ : float ) -> float: """simple docstring""" if radius < 0: raise ValueError('area_circle() only accepts non-negative values' ) return pi * radius**2 def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError('area_ellipse() only accepts non-negative values' ) return pi * radius_x * radius_y def lowerCamelCase_ ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float: """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError('area_rhombus() only accepts non-negative values' ) return 1 / 2 * diagonal_a * diagonal_a def lowerCamelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : float ) -> float: """simple docstring""" if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or sides < 3: raise ValueError( 'area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides' ) elif length < 0: raise ValueError( 'area_reg_polygon() only accepts non-negative values as \ length of a side' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f'''Rectangle: {area_rectangle(10, 20) = }''') print(f'''Square: {area_square(10) = }''') print(f'''Triangle: {area_triangle(10, 10) = }''') print(f'''Triangle: {area_triangle_three_sides(5, 12, 13) = }''') print(f'''Parallelogram: {area_parallelogram(10, 20) = }''') print(f'''Rhombus: {area_rhombus(10, 20) = }''') print(f'''Trapezium: {area_trapezium(10, 20, 30) = }''') print(f'''Circle: {area_circle(20) = }''') print(f'''Ellipse: {area_ellipse(10, 20) = }''') print("\nSurface Areas of various geometric shapes: \n") print(f'''Cube: {surface_area_cube(20) = }''') print(f'''Cuboid: {surface_area_cuboid(10, 20, 30) = }''') print(f'''Sphere: {surface_area_sphere(20) = }''') print(f'''Hemisphere: {surface_area_hemisphere(20) = }''') print(f'''Cone: {surface_area_cone(10, 20) = }''') print(f'''Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }''') print(f'''Cylinder: {surface_area_cylinder(10, 20) = }''') print(f'''Torus: {surface_area_torus(20, 10) = }''') print(f'''Equilateral Triangle: {area_reg_polygon(3, 10) = }''') print(f'''Square: {area_reg_polygon(4, 10) = }''') print(f'''Reqular Pentagon: {area_reg_polygon(5, 10) = }''')
90
0
"""simple docstring""" from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''Salesforce/blip-image-captioning-base''' SCREAMING_SNAKE_CASE_ =( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) SCREAMING_SNAKE_CASE_ ='''image_captioner''' SCREAMING_SNAKE_CASE_ =AutoModelForVisionaSeq SCREAMING_SNAKE_CASE_ =['''image'''] SCREAMING_SNAKE_CASE_ =['''text'''] def __init__( self : str , *snake_case__ : Dict , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*snake_case__ , **snake_case__ ) def __a ( self : Tuple , snake_case__ : "Image" ): '''simple docstring''' return self.pre_processor(images=snake_case__ , return_tensors="pt" ) def __a ( self : List[Any] , snake_case__ : Optional[int] ): '''simple docstring''' return self.model.generate(**snake_case__ ) def __a ( self : str , snake_case__ : str ): '''simple docstring''' return self.pre_processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )[0].strip()
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int ) -> float: """simple docstring""" return base * power(__a , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') a_ = int(input('''Enter the base: ''').strip()) a_ = int(input('''Enter the exponent: ''').strip()) a_ = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents a_ = 1 / result print(F"{base} to the power of {exponent} is {result}")
340
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class A (unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : List[str]=13 , __lowerCAmelCase : int=7 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : int=True , __lowerCAmelCase : Dict=True , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[Any]=99 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[Any]=5 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : Any=37 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : List[str]=5_12 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : List[Any]=0.0_2 , __lowerCAmelCase : Tuple=4 , ) -> Dict: """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_attention_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_choices def a_ ( self : Any ) -> str: """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_attention_mask: A__ = random_attention_mask([self.batch_size, self.seq_length] ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a_ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ , A__ = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class A (SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' __lowerCamelCase : str = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def a_ ( self : str ) -> Optional[int]: """simple docstring""" A__ = FlaxAlbertModelTester(self ) @slow def a_ ( self : int ) -> Tuple: """simple docstring""" for model_class_name in self.all_model_classes: A__ = model_class_name.from_pretrained("""albert-base-v2""" ) A__ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCAmelCase ) @require_flax class A (unittest.TestCase ): '''simple docstring''' @slow def a_ ( self : Dict ) -> List[Any]: """simple docstring""" A__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" ) A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) A__ = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0] A__ = (1, 11, 7_68) self.assertEqual(output.shape , __lowerCAmelCase ) A__ = np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __lowerCAmelCase , atol=1e-4 ) )
274
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase : str = { '''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''], '''tokenization_convbert''': ['''ConvBertTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = ['''ConvBertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = [ '''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ConvBertForMaskedLM''', '''ConvBertForMultipleChoice''', '''ConvBertForQuestionAnswering''', '''ConvBertForSequenceClassification''', '''ConvBertForTokenClassification''', '''ConvBertLayer''', '''ConvBertModel''', '''ConvBertPreTrainedModel''', '''load_tf_weights_in_convbert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : List[Any] = [ '''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFConvBertForMaskedLM''', '''TFConvBertForMultipleChoice''', '''TFConvBertForQuestionAnswering''', '''TFConvBertForSequenceClassification''', '''TFConvBertForTokenClassification''', '''TFConvBertLayer''', '''TFConvBertModel''', '''TFConvBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
340
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : int = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class A_ ( _a ): lowerCAmelCase__ = 'mobilenet_v1' def __init__( self: Tuple ,__lowerCAmelCase: int=3 ,__lowerCAmelCase: Dict=224 ,__lowerCAmelCase: int=1.0 ,__lowerCAmelCase: Tuple=8 ,__lowerCAmelCase: List[str]="relu6" ,__lowerCAmelCase: int=True ,__lowerCAmelCase: List[Any]=0.9_99 ,__lowerCAmelCase: Optional[int]=0.02 ,__lowerCAmelCase: Optional[int]=0.0_01 ,**__lowerCAmelCase: str ,): '''simple docstring''' super().__init__(**__lowerCAmelCase ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) _lowerCamelCase : List[str] = num_channels _lowerCamelCase : Union[str, Any] = image_size _lowerCamelCase : List[Any] = depth_multiplier _lowerCamelCase : Any = min_depth _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : Dict = tf_padding _lowerCamelCase : Union[str, Any] = classifier_dropout_prob _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : List[Any] = layer_norm_eps class A_ ( _a ): lowerCAmelCase__ = version.parse('1.11' ) @property def _lowercase ( self: Optional[int] ): '''simple docstring''' return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowercase ( self: Optional[Any] ): '''simple docstring''' if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowercase ( self: Any ): '''simple docstring''' return 1e-4
340
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() __lowerCAmelCase : Dict = logging.get_logger('transformers.models.speecht5') def a__ ( A_, A_, A_ ): '''simple docstring''' hf_model.apply_weight_norm() __magic_name__ = checkpoint["""input_conv.weight_g"""] __magic_name__ = checkpoint["""input_conv.weight_v"""] __magic_name__ = checkpoint["""input_conv.bias"""] for i in range(len(config.upsample_rates ) ): __magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_g'''] __magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_v'''] __magic_name__ = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] __magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] __magic_name__ = checkpoint["""output_conv.1.weight_g"""] __magic_name__ = checkpoint["""output_conv.1.weight_v"""] __magic_name__ = checkpoint["""output_conv.1.bias"""] hf_model.remove_weight_norm() @torch.no_grad() def a__ ( A_, A_, A_, A_=None, A_=None, ): '''simple docstring''' if config_path is not None: __magic_name__ = SpeechTaHifiGanConfig.from_pretrained(A_ ) else: __magic_name__ = SpeechTaHifiGanConfig() __magic_name__ = SpeechTaHifiGan(A_ ) __magic_name__ = torch.load(A_ ) load_weights(orig_checkpoint["""model"""]["""generator"""], A_, A_ ) __magic_name__ = np.load(A_ ) __magic_name__ = stats[0].reshape(-1 ) __magic_name__ = stats[1].reshape(-1 ) __magic_name__ = torch.from_numpy(A_ ).float() __magic_name__ = torch.from_numpy(A_ ).float() model.save_pretrained(A_ ) if repo_id: print("""Pushing to the hub...""" ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) __lowerCAmelCase : List[Any] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
88
def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(A_ ) == 0: raise ValueError("""Input list must be a non empty list""" ) if len(A_ ) == 1: return True __magic_name__ = series[1] - series[0] for index in range(len(A_ ) - 1 ): if series[index + 1] - series[index] != common_diff: return False return True def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise ValueError("""Input series is not valid, valid series - [2, 4, 6]""" ) if len(A_ ) == 0: raise ValueError("""Input list must be a non empty list""" ) __magic_name__ = 0 for val in series: answer += val return answer / len(A_ ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
from maths.prime_factors import prime_factors def __lowerCamelCase ( __a :int ) -> int: """simple docstring""" if not isinstance(__a , __a ): A__ = F'Input value of [number={number}] must be an integer' raise TypeError(__a ) if number < 1: raise ValueError("""Input must be a positive integer""" ) return -1 if len(prime_factors(__a ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
276
import math def __lowerCamelCase ( __a :int ) -> bool: """simple docstring""" A__ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__a ) def __lowerCamelCase ( __a :float = 1 / 1_2_3_4_5 ) -> int: """simple docstring""" A__ = 0 A__ = 0 A__ = 3 while True: A__ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__a ): A__ = int(__a ) total_partitions += 1 if check_partition_perfect(__a ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__a ) integer += 1 if __name__ == "__main__": print(F'''{solution() = }''')
276
1
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]: """simple docstring""" _UpperCAmelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value') _UpperCAmelCase = ( ('layer.', 'layer_'), ('word_embeddings.weight', 'word_embeddings'), ('position_embeddings.weight', 'position_embeddings'), ('token_type_embeddings.weight', 'token_type_embeddings'), ('.', '/'), ('LayerNorm/weight', 'LayerNorm/gamma'), ('LayerNorm/bias', 'LayerNorm/beta'), ('weight', 'kernel'), ) if not os.path.isdir(__lowerCAmelCase ): os.makedirs(__lowerCAmelCase ) _UpperCAmelCase = model.state_dict() def to_tf_var_name(__lowerCAmelCase ): for patt, repl in iter(__lowerCAmelCase ): _UpperCAmelCase = name.replace(__lowerCAmelCase , __lowerCAmelCase ) return F"""bert/{name}""" def create_tf_var(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype ) _UpperCAmelCase = tf.get_variable(dtype=__lowerCAmelCase , shape=tensor.shape , name=__lowerCAmelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(__lowerCAmelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: _UpperCAmelCase = to_tf_var_name(__lowerCAmelCase ) _UpperCAmelCase = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): _UpperCAmelCase = torch_tensor.T _UpperCAmelCase = create_tf_var(tensor=__lowerCAmelCase , name=__lowerCAmelCase , session=__lowerCAmelCase ) tf.keras.backend.set_value(__lowerCAmelCase , __lowerCAmelCase ) _UpperCAmelCase = session.run(__lowerCAmelCase ) print(F"""Successfully created {tf_name}: {np.allclose(__lowerCAmelCase , __lowerCAmelCase )}""" ) _UpperCAmelCase = tf.train.Saver(tf.trainable_variables() ) saver.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , model_name.replace('-' , '_' ) + '.ckpt' ) ) def __A ( __lowerCAmelCase=None )-> Any: """simple docstring""" _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument('--model_name' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='model name e.g. bert-base-uncased' ) parser.add_argument( '--cache_dir' , type=__lowerCAmelCase , default=__lowerCAmelCase , required=__lowerCAmelCase , help='Directory containing pytorch model' ) parser.add_argument('--pytorch_model_path' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='/path/to/<pytorch-model-name>.bin' ) parser.add_argument('--tf_cache_dir' , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Directory in which to save tensorflow model' ) _UpperCAmelCase = parser.parse_args(__lowerCAmelCase ) _UpperCAmelCase = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=__lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
39
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { '''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''', } class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Optional[Any] = '''focalnet''' def __init__( self ,__UpperCAmelCase=224 ,__UpperCAmelCase=4 ,__UpperCAmelCase=3 ,__UpperCAmelCase=96 ,__UpperCAmelCase=False ,__UpperCAmelCase=[192, 384, 768, 768] ,__UpperCAmelCase=[2, 2, 6, 2] ,__UpperCAmelCase=[2, 2, 2, 2] ,__UpperCAmelCase=[3, 3, 3, 3] ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=4.0 ,__UpperCAmelCase=0.0 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=False ,__UpperCAmelCase=1E-4 ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1E-5 ,__UpperCAmelCase=32 ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Optional[Any]: super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = image_size lowerCAmelCase__ : int = patch_size lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : List[str] = use_conv_embed lowerCAmelCase__ : List[Any] = hidden_sizes lowerCAmelCase__ : Dict = depths lowerCAmelCase__ : List[str] = focal_levels lowerCAmelCase__ : List[str] = focal_windows lowerCAmelCase__ : Dict = hidden_act lowerCAmelCase__ : Dict = mlp_ratio lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Tuple = drop_path_rate lowerCAmelCase__ : Dict = use_layerscale lowerCAmelCase__ : Optional[Any] = layerscale_value lowerCAmelCase__ : str = use_post_layernorm lowerCAmelCase__ : Union[str, Any] = use_post_layernorm_in_modulation lowerCAmelCase__ : int = normalize_modulator lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Dict = ["""stem"""] + [F"""stage{idx}""" for idx in range(1 ,len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Any = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase ,out_indices=__UpperCAmelCase ,stage_names=self.stage_names )
37
0
'''simple docstring''' # A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple: A_ = [False] * len(UpperCAmelCase__ ) A_ = [-1] * len(UpperCAmelCase__ ) def dfs(UpperCAmelCase__, UpperCAmelCase__ ): A_ = True A_ = c for u in graph[v]: if not visited[u]: dfs(UpperCAmelCase__, 1 - c ) for i in range(len(UpperCAmelCase__ ) ): if not visited[i]: dfs(UpperCAmelCase__, 0 ) for i in range(len(UpperCAmelCase__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph __lowerCamelCase = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
101
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class A__ ( _snake_case ): def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = SMALL_MODEL_IDENTIFIER A_ = """pt""" A_ = """tf""" def snake_case_ ( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' A_ = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' A_ = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ ) model_tf.save_pretrained(UpperCamelCase__ ) def snake_case_ ( self ) -> Dict: '''simple docstring''' A_ = """mock_framework""" # Framework provided - return whatever the user provides A_ = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase__ ) A_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase__ ) A_ = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def snake_case_ ( self ) -> int: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase__ ) A_ = FeaturesManager.determine_framework(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase__ ) A_ = FeaturesManager.determine_framework(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(UpperCamelCase__ ): A_ = FeaturesManager.determine_framework(UpperCamelCase__ ) def snake_case_ ( self ) -> List[Any]: '''simple docstring''' A_ = MagicMock(return_value=UpperCamelCase__ ) with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ): A_ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow A_ = MagicMock(return_value=UpperCamelCase__ ) with patch("""transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ): A_ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase__ , self.framework_tf ) # Both in environment -> use PyTorch A_ = MagicMock(return_value=UpperCamelCase__ ) A_ = MagicMock(return_value=UpperCamelCase__ ) with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch( """transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ): A_ = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase__ , self.framework_pt ) # Both not in environment -> raise error A_ = MagicMock(return_value=UpperCamelCase__ ) A_ = MagicMock(return_value=UpperCamelCase__ ) with patch("""transformers.onnx.features.is_tf_available""" , UpperCamelCase__ ), patch( """transformers.onnx.features.is_torch_available""" , UpperCamelCase__ ): with self.assertRaises(UpperCamelCase__ ): A_ = FeaturesManager.determine_framework(self.test_model )
101
1
"""simple docstring""" from scipy.stats import spearmanr import datasets lowercase__ : Optional[int] = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase__ : Union[str, Any] = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase__ : Tuple = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class UpperCamelCase__ ( datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self : str ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float' ), 'references': datasets.Value('float' ), } ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=False ): lowerCAmelCase_ : List[Any] = spearmanr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
224
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowercase__ : List[str] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> Optional[Any]: """simple docstring""" lowerCAmelCase_ : List[Any] = test_results.split(' ' ) lowerCAmelCase_ : Tuple = 0 lowerCAmelCase_ : Any = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. lowerCAmelCase_ : Any = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCAmelCase__ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def UpperCamelCase_ ( lowerCAmelCase__ : Dict ) -> str: """simple docstring""" lowerCAmelCase_ : Optional[Any] = {} lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : List[str] = False for line in failures_short_lines.split('\n' ): if re.search(R'_ \[doctest\]' , lowerCAmelCase__ ): lowerCAmelCase_ : Optional[Any] = True lowerCAmelCase_ : Union[str, Any] = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): lowerCAmelCase_ : Union[str, Any] = line lowerCAmelCase_ : int = False return failures class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ): lowerCAmelCase_ : str = title lowerCAmelCase_ : Optional[int] = doc_test_results['time_spent'].split(',' )[0] lowerCAmelCase_ : int = doc_test_results['success'] lowerCAmelCase_ : Dict = doc_test_results['failures'] lowerCAmelCase_ : Optional[int] = self.n_success + self.n_failures # Failures and success of the modeling tests lowerCAmelCase_ : Any = doc_test_results @property def SCREAMING_SNAKE_CASE__ ( self : List[str] ): lowerCAmelCase_ : int = [self._time_spent] lowerCAmelCase_ : Any = 0 for time in time_spent: lowerCAmelCase_ : Optional[Any] = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(SCREAMING_SNAKE_CASE_ ) == 1: lowerCAmelCase_ : Any = [0, 0, time_parts[0]] lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0 return F"{int(SCREAMING_SNAKE_CASE_ )}h{int(SCREAMING_SNAKE_CASE_ )}m{int(SCREAMING_SNAKE_CASE_ )}s" @property def SCREAMING_SNAKE_CASE__ ( self : int ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def SCREAMING_SNAKE_CASE__ ( self : str ): return { "type": "section", "text": { "type": "plain_text", "text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): return { "type": "section", "text": { "type": "plain_text", "text": ( F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" F" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def SCREAMING_SNAKE_CASE__ ( self : Tuple ): lowerCAmelCase_ : int = 4_0 lowerCAmelCase_ : List[Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )} lowerCAmelCase_ : List[str] = '' for category, failures in category_failures.items(): if len(SCREAMING_SNAKE_CASE_ ) == 0: continue if report != "": report += "\n\n" report += F"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(SCREAMING_SNAKE_CASE_ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F"The following examples had failures:\n\n\n{report}\n", }, } @property def SCREAMING_SNAKE_CASE__ ( self : int ): lowerCAmelCase_ : int = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(SCREAMING_SNAKE_CASE_ ) @staticmethod def SCREAMING_SNAKE_CASE__ ( ): lowerCAmelCase_ : Tuple = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(SCREAMING_SNAKE_CASE_ )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=SCREAMING_SNAKE_CASE_ , ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) lowerCAmelCase_ : Any = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.' lowerCAmelCase_ : str = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=SCREAMING_SNAKE_CASE_ , ) def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ): lowerCAmelCase_ : List[Any] = '' for key, value in failures.items(): lowerCAmelCase_ : List[Any] = value[:2_0_0] + ' [Truncated]' if len(SCREAMING_SNAKE_CASE_ ) > 2_5_0 else value failures_text += F"*{key}*\n_{value}_\n\n" lowerCAmelCase_ : int = job_name lowerCAmelCase_ : Dict = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: lowerCAmelCase_ : str = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) lowerCAmelCase_ : Dict = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) lowerCAmelCase_ : Dict = sorted(self.doc_test_results.items() , key=lambda SCREAMING_SNAKE_CASE_ : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): lowerCAmelCase_ : Tuple = F"*Num failures* :{len(job_result['failed'] )} \n" lowerCAmelCase_ : List[str] = job_result['failures'] lowerCAmelCase_ : Union[str, Any] = self.get_reply_blocks(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , text=SCREAMING_SNAKE_CASE_ ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"Results for {job}" , blocks=SCREAMING_SNAKE_CASE_ , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def UpperCamelCase_ ( ) -> str: """simple docstring""" lowerCAmelCase_ : Union[str, Any] = os.environ['GITHUB_RUN_ID'] lowerCAmelCase_ : int = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" lowerCAmelCase_ : str = requests.get(lowerCAmelCase__ ).json() lowerCAmelCase_ : List[str] = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) lowerCAmelCase_ : Optional[Any] = math.ceil((result['total_count'] - 100) / 100 ) for i in range(lowerCAmelCase__ ): lowerCAmelCase_ : int = requests.get(url + f"&page={i + 2}" ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , lowerCAmelCase__ ) return {} def UpperCamelCase_ ( lowerCAmelCase__ : str ) -> List[Any]: """simple docstring""" lowerCAmelCase_ : int = {} if os.path.exists(lowerCAmelCase__ ): lowerCAmelCase_ : Tuple = os.listdir(lowerCAmelCase__ ) for file in files: try: with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding='utf-8' ) as f: lowerCAmelCase_ : List[str] = f.read() except UnicodeDecodeError as e: raise ValueError(f"Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}." ) from e return _artifact def UpperCamelCase_ ( ) -> Dict: """simple docstring""" class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str ): lowerCAmelCase_ : List[Any] = name lowerCAmelCase_ : Tuple = [] def __str__( self : Optional[Any] ): return self.name def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str ): self.paths.append({'name': self.name, 'path': path} ) lowerCAmelCase_ : Dict[str, Artifact] = {} lowerCAmelCase_ : Any = filter(os.path.isdir , os.listdir() ) for directory in directories: lowerCAmelCase_ : int = directory if artifact_name not in _available_artifacts: lowerCAmelCase_ : Optional[Any] = Artifact(lowerCAmelCase__ ) _available_artifacts[artifact_name].add_path(lowerCAmelCase__ ) return _available_artifacts if __name__ == "__main__": lowercase__ : Optional[int] = get_job_links() lowercase__ : Any = retrieve_available_artifacts() lowercase__ : str = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowercase__ : Dict = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowercase__ : str = github_actions_job_links.get("""run_doctests""") lowercase__ : int = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowercase__ : List[str] = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowercase__ , lowercase__ , lowercase__ : str = handle_test_results(artifact["""stats"""]) lowercase__ : Any = failed lowercase__ : str = success lowercase__ : int = time_spent[1:-1] + """, """ lowercase__ : Tuple = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowercase__ : List[str] = line.replace("""FAILED """, """""") lowercase__ : Union[str, Any] = line.split()[0].replace("""\n""", """""") if "::" in line: lowercase__ , lowercase__ : Optional[Any] = line.split("""::""") else: lowercase__ , lowercase__ : int = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowercase__ : str = docs[file_regex] doc_test_results[category]["failed"].append(test) lowercase__ : List[Any] = all_failures[test] if test in all_failures else """N/A""" lowercase__ : List[Any] = failure break lowercase__ : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
224
1
"""simple docstring""" import numpy as np def SCREAMING_SNAKE_CASE__ ( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : float = 1E-1_2 , snake_case : int = 100 , )-> tuple[float, np.ndarray]: '''simple docstring''' assert np.shape(snake_case )[0] == np.shape(snake_case )[1] # Ensure proper dimensionality. assert np.shape(snake_case )[0] == np.shape(snake_case )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(snake_case ) == np.iscomplexobj(snake_case ) UpperCAmelCase__ : Dict = np.iscomplexobj(snake_case ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(snake_case , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. UpperCAmelCase__ : Any = False UpperCAmelCase__ : List[str] = 0 UpperCAmelCase__ : str = 0 UpperCAmelCase__ : int = 1E1_2 while not convergence: # Multiple matrix by the vector. UpperCAmelCase__ : Union[str, Any] = np.dot(snake_case , snake_case ) # Normalize the resulting output vector. UpperCAmelCase__ : Optional[Any] = w / np.linalg.norm(snake_case ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) UpperCAmelCase__ : Union[str, Any] = vector.conj().T if is_complex else vector.T UpperCAmelCase__ : List[Any] = np.dot(snake_case , np.dot(snake_case , snake_case ) ) # Check convergence. UpperCAmelCase__ : Tuple = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: UpperCAmelCase__ : Union[str, Any] = True UpperCAmelCase__ : int = lambda_ if is_complex: UpperCAmelCase__ : List[str] = np.real(lambda_ ) return lambda_, vector def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) UpperCAmelCase__ : Optional[Any] = np.array([41, 4, 20] ) UpperCAmelCase__ : Dict = real_input_matrix.astype(np.complexaaa ) UpperCAmelCase__ : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T UpperCAmelCase__ : Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": UpperCAmelCase__ : int = real_input_matrix UpperCAmelCase__ : Dict = real_vector elif problem_type == "complex": UpperCAmelCase__ : Optional[Any] = complex_input_matrix UpperCAmelCase__ : Dict = complex_vector # Our implementation. UpperCAmelCase__ , UpperCAmelCase__ : Tuple = power_iteration(snake_case , snake_case ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). UpperCAmelCase__ , UpperCAmelCase__ : str = np.linalg.eigh(snake_case ) # Last eigenvalue is the maximum one. UpperCAmelCase__ : Union[str, Any] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. UpperCAmelCase__ : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(snake_case ) - np.abs(snake_case ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
298
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __a = logging.get_logger(__name__) class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Any = ['''pixel_values'''] def __init__( self : str , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : int , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase__ ) _UpperCAmelCase : List[str] = size if size is not None else {"height": 2_5_6, "width": 2_5_6} _UpperCAmelCase : Dict = get_size_dict(lowerCAmelCase__ ) _UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} _UpperCAmelCase : Any = get_size_dict(lowerCAmelCase__ , param_name="crop_size" ) _UpperCAmelCase : Optional[Any] = do_resize _UpperCAmelCase : Optional[int] = size _UpperCAmelCase : Optional[Any] = resample _UpperCAmelCase : List[Any] = do_center_crop _UpperCAmelCase : Dict = crop_size _UpperCAmelCase : Optional[Any] = do_rescale _UpperCAmelCase : Union[str, Any] = rescale_factor _UpperCAmelCase : List[str] = do_normalize _UpperCAmelCase : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self : int , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PIL.Image.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : int , ) -> np.ndarray: """simple docstring""" _UpperCAmelCase : int = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return resize( lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : int , ) -> np.ndarray: """simple docstring""" _UpperCAmelCase : Optional[Any] = get_size_dict(lowerCAmelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[int, float] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple , ) -> Any: """simple docstring""" return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : float = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ : str , ) -> PIL.Image.Image: """simple docstring""" _UpperCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase : List[str] = resample if resample is not None else self.resample _UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase : int = image_std if image_std is not None else self.image_std _UpperCAmelCase : Tuple = size if size is not None else self.size _UpperCAmelCase : Optional[int] = get_size_dict(lowerCAmelCase__ ) _UpperCAmelCase : int = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase : int = get_size_dict(lowerCAmelCase__ , param_name="crop_size" ) _UpperCAmelCase : Optional[Any] = make_list_of_images(lowerCAmelCase__ ) if not valid_images(lowerCAmelCase__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _UpperCAmelCase : Optional[int] = [to_numpy_array(lowerCAmelCase__ ) for image in images] if do_resize: _UpperCAmelCase : Tuple = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images] if do_center_crop: _UpperCAmelCase : Dict = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images] if do_rescale: _UpperCAmelCase : Dict = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images] if do_normalize: _UpperCAmelCase : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images] _UpperCAmelCase : Optional[int] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images] _UpperCAmelCase : Union[str, Any] = {"pixel_values": images} return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
145
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __a = None __a = logging.get_logger(__name__) __a = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json', 'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json', 'moussaKam/barthez-orangesum-title': ( 'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json' ), }, } __a = { 'moussaKam/mbarthez': 1_024, 'moussaKam/barthez': 1_024, 'moussaKam/barthez-orangesum-title': 1_024, } __a = '▁' class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : List[Any] = VOCAB_FILES_NAMES UpperCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase_ : int = ['''input_ids''', '''attention_mask'''] UpperCamelCase_ : List[str] = BarthezTokenizer def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Tuple="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Tuple="<s>" , lowerCAmelCase__ : Any="<unk>" , lowerCAmelCase__ : Any="<pad>" , lowerCAmelCase__ : List[str]="<mask>" , **lowerCAmelCase__ : Dict , ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) _UpperCAmelCase : Any = vocab_file _UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : Optional[Any] = [self.cls_token_id] _UpperCAmelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return _UpperCAmelCase : List[str] = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
145
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _lowerCAmelCase ( lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Dict = KandinskyVaaImgaImgPipeline __UpperCAmelCase : Tuple = ["image_embeds", "negative_image_embeds", "image"] __UpperCAmelCase : Tuple = [ "image_embeds", "negative_image_embeds", "image", ] __UpperCAmelCase : Any = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] __UpperCAmelCase : Tuple = False @property def _lowercase ( self : Tuple ): return 3_2 @property def _lowercase ( self : Tuple ): return 3_2 @property def _lowercase ( self : Optional[Any] ): return self.time_input_dim @property def _lowercase ( self : List[Any] ): return self.time_input_dim * 4 @property def _lowercase ( self : str ): return 1_0_0 @property def _lowercase ( self : Dict ): torch.manual_seed(0 ) __lowercase = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } __lowercase = UNetaDConditionModel(**UpperCAmelCase__ ) return model @property def _lowercase ( self : Dict ): return { "block_out_channels": [3_2, 6_4], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def _lowercase ( self : int ): torch.manual_seed(0 ) __lowercase = VQModel(**self.dummy_movq_kwargs ) return model def _lowercase ( self : int ): __lowercase = self.dummy_unet __lowercase = self.dummy_movq __lowercase = { "num_train_timesteps": 1_0_0_0, "beta_schedule": "linear", "beta_start": 0.00_085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } __lowercase = DDIMScheduler(**UpperCAmelCase__ ) __lowercase = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def _lowercase ( self : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : Any=0 ): __lowercase = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) __lowercase = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to( UpperCAmelCase__ ) # create init_image __lowercase = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ ) __lowercase = image.cpu().permute(0, 2, 3, 1 )[0] __lowercase = Image.fromarray(np.uinta(UpperCAmelCase__ ) ).convert("RGB" ).resize((2_5_6, 2_5_6) ) if str(UpperCAmelCase__ ).startswith("mps" ): __lowercase = torch.manual_seed(UpperCAmelCase__ ) else: __lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ ) __lowercase = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 6_4, "width": 6_4, "num_inference_steps": 1_0, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def _lowercase ( self : List[Any] ): __lowercase = "cpu" __lowercase = self.get_dummy_components() __lowercase = self.pipeline_class(**UpperCAmelCase__ ) __lowercase = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = pipe(**self.get_dummy_inputs(UpperCAmelCase__ ) ) __lowercase = output.images __lowercase = pipe( **self.get_dummy_inputs(UpperCAmelCase__ ), return_dict=UpperCAmelCase__, )[0] __lowercase = image[0, -3:, -3:, -1] __lowercase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) __lowercase = np.array( [0.6_199_778, 0.63_984_406, 0.46_145_785, 0.62_944_984, 0.5_622_215, 0.47_306_132, 0.47_441_456, 0.4_607_606, 0.48_719_263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Tuple ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : str ): __lowercase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) __lowercase = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) __lowercase = "A red cartoon frog, 4k" __lowercase = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.floataa ) pipe_prior.to(UpperCAmelCase__ ) __lowercase = KandinskyVaaImgaImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.floataa ) __lowercase = pipeline.to(UpperCAmelCase__ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase__ ) __lowercase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase ,__lowercase = pipe_prior( UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=5, negative_prompt="", ).to_tuple() __lowercase = pipeline( image=UpperCAmelCase__, image_embeds=UpperCAmelCase__, negative_image_embeds=UpperCAmelCase__, generator=UpperCAmelCase__, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, strength=0.2, output_type="np", ) __lowercase = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(UpperCAmelCase__, UpperCAmelCase__ )
144
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _a = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING __UpperCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: __UpperCAmelCase : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: __UpperCAmelCase : Tuple = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _lowercase ( self : Optional[int] ): __lowercase = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) __lowercase = text_classifier("This is great !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] ) __lowercase = text_classifier("This is great !", top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) __lowercase = text_classifier(["This is great !", "This is bad"], top_k=2 ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) __lowercase = text_classifier("This is great !", top_k=1 ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] ) # Legacy behavior __lowercase = text_classifier("This is great !", return_all_scores=UpperCAmelCase__ ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] ) __lowercase = text_classifier("This is great !", return_all_scores=UpperCAmelCase__ ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) __lowercase = text_classifier(["This is great !", "Something else"], return_all_scores=UpperCAmelCase__ ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) __lowercase = text_classifier(["This is great !", "Something else"], return_all_scores=UpperCAmelCase__ ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def _lowercase ( self : Dict ): import torch __lowercase = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch.device("cpu" ), ) __lowercase = text_classifier("This is great !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] ) @require_tf def _lowercase ( self : Union[str, Any] ): __lowercase = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) __lowercase = text_classifier("This is great !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] ) @slow @require_torch def _lowercase ( self : Dict ): __lowercase = pipeline("text-classification" ) __lowercase = text_classifier("This is great !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 1.0}] ) __lowercase = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "NEGATIVE", "score": 1.0}] ) __lowercase = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 0.988}] ) @slow @require_tf def _lowercase ( self : Tuple ): __lowercase = pipeline("text-classification", framework="tf" ) __lowercase = text_classifier("This is great !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 1.0}] ) __lowercase = text_classifier("This is bad !" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "NEGATIVE", "score": 1.0}] ) __lowercase = text_classifier("Birds are a type of animal" ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 0.988}] ) def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ): __lowercase = TextClassificationPipeline(model=UpperCAmelCase__, tokenizer=UpperCAmelCase__ ) return text_classifier, ["HuggingFace is in", "This is another test"] def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ): __lowercase = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __lowercase = "HuggingFace is in" __lowercase = text_classifier(UpperCAmelCase__ ) self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) __lowercase = ["HuggingFace is in ", "Paris is in France"] __lowercase = text_classifier(UpperCAmelCase__ ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}], ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __lowercase = text_classifier(UpperCAmelCase__, top_k=UpperCAmelCase__ ) __lowercase = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [[{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N, [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N], ) __lowercase = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} __lowercase = text_classifier(UpperCAmelCase__ ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, ) self.assertTrue(outputs["label"] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __lowercase = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(UpperCAmelCase__ ): text_classifier(UpperCAmelCase__ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __lowercase = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] ) self.assertEqual( nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}], ) self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
144
1