code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = KandinskyVaaPriorPipeline __UpperCamelCase = ['''prompt'''] __UpperCamelCase = ['''prompt''', '''negative_prompt'''] __UpperCamelCase = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] __UpperCamelCase = False @property def lowerCamelCase__ ( self : List[Any] ): return 3_2 @property def lowerCamelCase__ ( self : str ): return 3_2 @property def lowerCamelCase__ ( self : Tuple ): return self.time_input_dim @property def lowerCamelCase__ ( self : Any ): return self.time_input_dim * 4 @property def lowerCamelCase__ ( self : int ): return 1_0_0 @property def lowerCamelCase__ ( self : int ): lowerCAmelCase : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def lowerCamelCase__ ( self : Optional[int] ): torch.manual_seed(0 ) lowerCAmelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(UpperCamelCase_ ) @property def lowerCamelCase__ ( self : List[Any] ): torch.manual_seed(0 ) lowerCAmelCase : Any = { '''num_attention_heads''': 2, '''attention_head_dim''': 1_2, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } lowerCAmelCase : List[Any] = PriorTransformer(**UpperCamelCase_ ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 lowerCAmelCase : int = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def lowerCamelCase__ ( self : Union[str, Any] ): torch.manual_seed(0 ) lowerCAmelCase : int = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , ) lowerCAmelCase : Union[str, Any] = CLIPVisionModelWithProjection(UpperCamelCase_ ) return model @property def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Dict = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , ) return image_processor def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[int] = self.dummy_prior lowerCAmelCase : List[str] = self.dummy_image_encoder lowerCAmelCase : Tuple = self.dummy_text_encoder lowerCAmelCase : List[Any] = self.dummy_tokenizer lowerCAmelCase : List[str] = self.dummy_image_processor lowerCAmelCase : Optional[int] = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=UpperCamelCase_ , clip_sample_range=10.0 , ) lowerCAmelCase : int = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=0 ): if str(UpperCamelCase_ ).startswith('''mps''' ): lowerCAmelCase : Dict = torch.manual_seed(UpperCamelCase_ ) else: lowerCAmelCase : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowerCAmelCase : str = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[Any] = '''cpu''' lowerCAmelCase : List[str] = self.get_dummy_components() lowerCAmelCase : List[Any] = self.pipeline_class(**UpperCamelCase_ ) lowerCAmelCase : List[str] = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Any = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) lowerCAmelCase : Tuple = output.image_embeds lowerCAmelCase : Optional[int] = pipe( **self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0] lowerCAmelCase : Dict = image[0, -1_0:] lowerCAmelCase : str = image_from_tuple[0, -1_0:] assert image.shape == (1, 3_2) lowerCAmelCase : List[str] = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[Any] = torch_device == '''cpu''' lowerCAmelCase : Tuple = True lowerCAmelCase : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , ) @skip_mps def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[Any] = torch_device == '''cpu''' lowerCAmelCase : Tuple = False self._test_attention_slicing_forward_pass( test_max_difference=UpperCamelCase_ , test_mean_pixel_difference=UpperCamelCase_ , )
637
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example snake_case__ : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _snake_case ( _snake_case : list[list[int]] ): lowerCAmelCase : Union[str, Any] = [] for i in range(len(_snake_case ) ): lowerCAmelCase : Any = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_snake_case ) return next_generation def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ): lowerCAmelCase : int = [] for _ in range(_snake_case ): # Create output image lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) ) lowerCAmelCase : Union[str, Any] = img.load() # Save cells to image for x in range(len(_snake_case ) ): for y in range(len(cells[0] ) ): lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255 lowerCAmelCase : List[Any] = (colour, colour, colour) # Save image images.append(_snake_case ) lowerCAmelCase : Union[str, Any] = new_generation(_snake_case ) return images if __name__ == "__main__": snake_case__ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
637
1
"""simple docstring""" import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() snake_case__ : str = logging.get_logger('''transformers.models.encodec''') snake_case__ : Dict = { '''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''', '''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''', '''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''', '''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''', } snake_case__ : List[str] = { '''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''', '''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''', '''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''', '''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''', '''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''', '''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''', '''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''', '''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''', '''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''', '''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''', '''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''', '''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''', '''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''', '''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''', '''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''', '''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''', '''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''', '''encoder.model.13.lstm''': '''encoder.layers.13.lstm''', '''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''', } snake_case__ : Optional[int] = { '''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''', '''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''', '''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''', '''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''', '''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''', '''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''', '''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''', '''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''', '''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''', '''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''', '''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''', '''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''', '''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''', '''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''', '''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''', '''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''', '''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''', '''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''', } snake_case__ : Optional[int] = { '''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''', '''decoder.model.1.lstm''': '''decoder.layers.1.lstm''', '''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''', '''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''', '''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''', '''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''', '''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''', '''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''', '''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''', '''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''', '''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''', '''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''', '''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''', '''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''', '''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''', '''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''', '''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''', '''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''', '''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''', } snake_case__ : List[str] = { '''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''', '''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''', '''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''', '''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''', '''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''', '''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''', '''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''', '''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''', '''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''', '''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''', '''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''', '''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''', '''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''', '''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''', '''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''', '''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''', '''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''', '''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''', } snake_case__ : str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } snake_case__ : str = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } snake_case__ : Any = [] snake_case__ : int = [] def _snake_case ( _snake_case : str , _snake_case : str , _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[Any] ): for attribute in key.split('''.''' ): lowerCAmelCase : List[Any] = getattr(_snake_case , _snake_case ) if weight_type is not None: lowerCAmelCase : List[Any] = getattr(_snake_case , _snake_case ).shape else: lowerCAmelCase : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowerCAmelCase : List[str] = value elif weight_type == "weight_g": lowerCAmelCase : Optional[int] = value elif weight_type == "weight_v": lowerCAmelCase : Union[str, Any] = value elif weight_type == "bias": lowerCAmelCase : int = value elif weight_type == "running_mean": lowerCAmelCase : Optional[Any] = value elif weight_type == "running_var": lowerCAmelCase : Any = value elif weight_type == "num_batches_tracked": lowerCAmelCase : Tuple = value elif weight_type == "weight_ih_l0": lowerCAmelCase : str = value elif weight_type == "weight_hh_l0": lowerCAmelCase : Optional[int] = value elif weight_type == "bias_ih_l0": lowerCAmelCase : Dict = value elif weight_type == "bias_hh_l0": lowerCAmelCase : Dict = value elif weight_type == "weight_ih_l1": lowerCAmelCase : Optional[Any] = value elif weight_type == "weight_hh_l1": lowerCAmelCase : Tuple = value elif weight_type == "bias_ih_l1": lowerCAmelCase : Dict = value elif weight_type == "bias_hh_l1": lowerCAmelCase : Optional[Any] = value else: lowerCAmelCase : Tuple = value logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def _snake_case ( _snake_case : int , _snake_case : str ): for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: lowerCAmelCase, lowerCAmelCase : List[Any] = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def _snake_case ( _snake_case : int , _snake_case : Any , _snake_case : Tuple ): lowerCAmelCase : Any = [] if model_name == "encodec_24khz" or "encodec_32khz": lowerCAmelCase : Optional[int] = MAPPING_24K elif model_name == "encodec_48khz": lowerCAmelCase : Optional[Any] = MAPPING_48K else: raise ValueError(f'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(_snake_case , _snake_case ): logger.info(f'''{name} was ignored''' ) continue lowerCAmelCase : Union[str, Any] = False for key, mapped_key in MAPPING.items(): if "*" in key: lowerCAmelCase, lowerCAmelCase : List[str] = key.split('''.*.''' ) if prefix in name and suffix in name: lowerCAmelCase : int = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue lowerCAmelCase : List[Any] = True if "*" in mapped_key: lowerCAmelCase : Union[str, Any] = name.split(_snake_case )[0].split('''.''' )[-2] lowerCAmelCase : Optional[int] = mapped_key.replace('''*''' , _snake_case ) if "weight_g" in name: lowerCAmelCase : List[str] = '''weight_g''' elif "weight_v" in name: lowerCAmelCase : Optional[int] = '''weight_v''' elif "weight_ih_l0" in name: lowerCAmelCase : List[Any] = '''weight_ih_l0''' elif "weight_hh_l0" in name: lowerCAmelCase : Union[str, Any] = '''weight_hh_l0''' elif "bias_ih_l0" in name: lowerCAmelCase : Optional[Any] = '''bias_ih_l0''' elif "bias_hh_l0" in name: lowerCAmelCase : int = '''bias_hh_l0''' elif "weight_ih_l1" in name: lowerCAmelCase : Tuple = '''weight_ih_l1''' elif "weight_hh_l1" in name: lowerCAmelCase : Any = '''weight_hh_l1''' elif "bias_ih_l1" in name: lowerCAmelCase : List[str] = '''bias_ih_l1''' elif "bias_hh_l1" in name: lowerCAmelCase : Union[str, Any] = '''bias_hh_l1''' elif "bias" in name: lowerCAmelCase : Optional[Any] = '''bias''' elif "weight" in name: lowerCAmelCase : List[str] = '''weight''' elif "running_mean" in name: lowerCAmelCase : str = '''running_mean''' elif "running_var" in name: lowerCAmelCase : Union[str, Any] = '''running_var''' elif "num_batches_tracked" in name: lowerCAmelCase : Optional[Any] = '''num_batches_tracked''' else: lowerCAmelCase : Tuple = None set_recursively(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) continue if not is_used: unused_weights.append(_snake_case ) logger.warning(f'''Unused weights: {unused_weights}''' ) @torch.no_grad() def _snake_case ( _snake_case : Any , _snake_case : int , _snake_case : List[Any] , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , ): if config_path is not None: lowerCAmelCase : Optional[int] = EncodecConfig.from_pretrained(_snake_case ) else: lowerCAmelCase : int = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": lowerCAmelCase : str = [8, 5, 4, 4] lowerCAmelCase : Optional[int] = [2.2] lowerCAmelCase : int = 64 lowerCAmelCase : List[str] = 32000 lowerCAmelCase : str = 2048 lowerCAmelCase : str = False lowerCAmelCase : List[str] = False lowerCAmelCase : str = False elif model_name == "encodec_48khz": lowerCAmelCase : Dict = [8, 5, 4, 2] lowerCAmelCase : Any = [3.0, 6.0, 12.0, 24.0] lowerCAmelCase : Tuple = 48000 lowerCAmelCase : Optional[Any] = 2 lowerCAmelCase : int = False lowerCAmelCase : Union[str, Any] = '''time_group_norm''' lowerCAmelCase : List[Any] = True lowerCAmelCase : str = 1.0 lowerCAmelCase : Optional[int] = 0.01 else: raise ValueError(f'''Unknown model name: {model_name}''' ) lowerCAmelCase : Dict = EncodecModel(_snake_case ) lowerCAmelCase : Any = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(_snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights lowerCAmelCase : Dict = original_checkpoint['''best_state'''] recursively_load_weights(_snake_case , _snake_case , _snake_case ) model.save_pretrained(_snake_case ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(_snake_case ) model.push_to_hub(_snake_case ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--model''', default='''encodec_24khz''', type=str, help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''', ) parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) snake_case__ : Union[str, Any] = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
637
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self : Dict ): # searches pattern in text and returns index positions lowerCAmelCase : Union[str, Any] = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ ) if mismatch_index == -1: positions.append(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : str = '''ABAABA''' snake_case__ : List[str] = '''AB''' snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern) snake_case__ : Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
637
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=a__ ) class snake_case_( a__ ): __UpperCamelCase = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) __UpperCamelCase = Features({'''audio''': Audio()} ) __UpperCamelCase = Features({'''transcription''': Value('''string''' )} ) __UpperCamelCase = "audio" __UpperCamelCase = "transcription" def lowerCamelCase__ ( self : str , UpperCamelCase_ : str ): if self.audio_column not in features: raise ValueError(F'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , UpperCamelCase_ ): raise ValueError(F'''Column {self.audio_column} is not an Audio type.''' ) lowerCAmelCase : Tuple = copy.deepcopy(self ) lowerCAmelCase : Any = self.input_schema.copy() lowerCAmelCase : str = features[self.audio_column] lowerCAmelCase : str = input_schema return task_template @property def lowerCamelCase__ ( self : int ): return {self.audio_column: "audio", self.transcription_column: "transcription"}
637
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_( a__ ): pass class snake_case_: def __init__( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Any = data lowerCAmelCase : Node | None = None def __iter__( self : int ): lowerCAmelCase : Any = self lowerCAmelCase : Union[str, Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCamelCase_ ) yield node.data lowerCAmelCase : Optional[int] = node.next_node @property def lowerCamelCase__ ( self : str ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": snake_case__ : Dict = Node(1) snake_case__ : Any = Node(2) snake_case__ : int = Node(3) snake_case__ : Any = Node(4) print(root_node.has_loop) # False snake_case__ : Tuple = root_node.next_node print(root_node.has_loop) # True snake_case__ : List[Any] = Node(5) snake_case__ : int = Node(6) snake_case__ : List[Any] = Node(5) snake_case__ : Dict = Node(6) print(root_node.has_loop) # False snake_case__ : Any = Node(1) print(root_node.has_loop) # False
637
1
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class snake_case_: def __init__( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=1_0_0 , UpperCamelCase_ : List[str]=1_3 , UpperCamelCase_ : Optional[int]=3_0 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : str=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[Any]=3_2 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : int=1_0 , UpperCamelCase_ : Tuple=0.02 , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=[0, 1, 2, 3] , ): lowerCAmelCase : Dict = parent lowerCAmelCase : List[Any] = 1_0_0 lowerCAmelCase : List[str] = batch_size lowerCAmelCase : Union[str, Any] = image_size lowerCAmelCase : Dict = patch_size lowerCAmelCase : Any = num_channels lowerCAmelCase : List[Any] = is_training lowerCAmelCase : Union[str, Any] = use_labels lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : Dict = num_hidden_layers lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Optional[Any] = intermediate_size lowerCAmelCase : Dict = hidden_act lowerCAmelCase : Tuple = hidden_dropout_prob lowerCAmelCase : List[Any] = attention_probs_dropout_prob lowerCAmelCase : Union[str, Any] = type_sequence_label_size lowerCAmelCase : Any = initializer_range lowerCAmelCase : Optional[Any] = scope lowerCAmelCase : Tuple = out_indices lowerCAmelCase : List[str] = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2 lowerCAmelCase : List[Any] = num_patches + 1 def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : List[Any] = None lowerCAmelCase : Optional[int] = None if self.use_labels: lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase__ ( self : Any ): return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str ): lowerCAmelCase : Union[str, Any] = BeitModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = BeitForMaskedImageModeling(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ): lowerCAmelCase : Union[str, Any] = self.type_sequence_label_size lowerCAmelCase : Union[str, Any] = BeitForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase : List[str] = 1 lowerCAmelCase : str = BeitForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ): lowerCAmelCase : List[str] = self.num_labels lowerCAmelCase : int = BeitForSemanticSegmentation(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : int = model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = config_and_inputs lowerCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = ( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) __UpperCamelCase = ( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = BeitModelTester(self ) lowerCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : Optional[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='''BEiT does not use inputs_embeds''' ) def lowerCamelCase__ ( self : str ): pass @require_torch_multi_gpu @unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def lowerCamelCase__ ( self : Optional[int] ): pass def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Any = model_class(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : List[Any] = [*signature.parameters.keys()] lowerCAmelCase : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): if not self.model_tester.is_training: return lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling]: continue lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() lowerCAmelCase : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) lowerCAmelCase : List[Any] = model(**UpperCamelCase_ ).loss loss.backward() def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return lowerCAmelCase : Any = False lowerCAmelCase : Dict = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(UpperCamelCase_ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue lowerCAmelCase : str = model_class(UpperCamelCase_ ) model.gradient_checkpointing_enable() model.to(UpperCamelCase_ ) model.train() lowerCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model(**UpperCamelCase_ ).loss loss.backward() def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = _config_zero_init(UpperCamelCase_ ) for model_class in self.all_model_classes: lowerCAmelCase : str = model_class(config=UpperCamelCase_ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @slow def lowerCamelCase__ ( self : Any ): for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Tuple = BeitModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def _snake_case ( ): lowerCAmelCase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class snake_case_( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self : Dict ): return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = self.default_image_processor lowerCAmelCase : Union[str, Any] = prepare_img() lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase_ ) # prepare bool_masked_pos lowerCAmelCase : List[Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : List[Any] = model(pixel_values=UpperCamelCase_ , bool_masked_pos=UpperCamelCase_ ) lowerCAmelCase : List[str] = outputs.logits # verify the logits lowerCAmelCase : List[Any] = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = torch.tensor( [[-3.2_437, 0.5_072, -13.9_174], [-3.2_456, 0.4_948, -13.9_401], [-3.2_033, 0.5_121, -13.8_550]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase_ , atol=1E-2 ) ) @slow def lowerCamelCase__ ( self : int ): lowerCAmelCase : Dict = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = self.default_image_processor lowerCAmelCase : Union[str, Any] = prepare_img() lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : int = model(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = outputs.logits # verify the logits lowerCAmelCase : List[str] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = torch.tensor([-1.2_385, -1.0_987, -1.0_108] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) lowerCAmelCase : Optional[int] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to( UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = self.default_image_processor lowerCAmelCase : Optional[int] = prepare_img() lowerCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ ) lowerCAmelCase : Tuple = outputs.logits # verify the logits lowerCAmelCase : Tuple = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.tensor([1.6_881, -0.2_787, 0.5_901] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) ) lowerCAmelCase : Optional[int] = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) lowerCAmelCase : str = model.to(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = BeitImageProcessor(do_resize=UpperCamelCase_ , size=6_4_0 , do_center_crop=UpperCamelCase_ ) lowerCAmelCase : Any = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) lowerCAmelCase : int = Image.open(ds[0]['''file'''] ) lowerCAmelCase : Tuple = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : List[Any] = model(**UpperCamelCase_ ) lowerCAmelCase : Any = outputs.logits # verify the logits lowerCAmelCase : List[str] = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , UpperCamelCase_ ) lowerCAmelCase : List[str] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' ) if is_pillow_less_than_a: lowerCAmelCase : Any = torch.tensor( [ [[-4.9_225, -2.3_954, -3.0_522], [-2.8_822, -1.0_046, -1.7_561], [-2.9_549, -1.3_228, -2.1_347]], [[-5.8_168, -3.4_129, -4.0_778], [-3.8_651, -2.2_214, -3.0_277], [-3.8_356, -2.4_643, -3.3_535]], [[-0.0_078, 3.9_952, 4.0_754], [2.9_856, 4.6_944, 5.0_035], [3.2_413, 4.7_813, 4.9_969]], ] , device=UpperCamelCase_ , ) else: lowerCAmelCase : Tuple = torch.tensor( [ [[-4.8_960, -2.3_688, -3.0_355], [-2.8_478, -0.9_836, -1.7_418], [-2.9_449, -1.3_332, -2.1_456]], [[-5.8_081, -3.4_124, -4.1_006], [-3.8_561, -2.2_081, -3.0_323], [-3.8_365, -2.4_601, -3.3_669]], [[-0.0_309, 3.9_868, 4.0_540], [2.9_640, 4.6_877, 4.9_976], [3.2_081, 4.7_690, 4.9_942]], ] , device=UpperCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' ) lowerCAmelCase : Optional[int] = model.to(UpperCamelCase_ ) lowerCAmelCase : Dict = BeitImageProcessor(do_resize=UpperCamelCase_ , size=6_4_0 , do_center_crop=UpperCamelCase_ ) lowerCAmelCase : Dict = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' ) lowerCAmelCase : str = Image.open(ds[0]['''file'''] ) lowerCAmelCase : List[str] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : Optional[Any] = model(**UpperCamelCase_ ) lowerCAmelCase : List[str] = outputs.logits.detach().cpu() lowerCAmelCase : str = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(5_0_0, 3_0_0)] ) lowerCAmelCase : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ ) lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ ) lowerCAmelCase : str = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
637
"""simple docstring""" from torch import nn class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ): super().__init__() lowerCAmelCase : str = class_size lowerCAmelCase : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase : int = self.mlp(UpperCamelCase_ ) return logits
637
1
"""simple docstring""" import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class snake_case_: def __init__( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=2 , UpperCamelCase_ : List[Any]=3_2 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=3_2 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Optional[Any]=[0, 1, 2, 3] , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : Dict=3_7 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Dict=[1, 3_8_4, 2_4, 2_4] , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=None , ): lowerCAmelCase : List[Any] = parent lowerCAmelCase : int = batch_size lowerCAmelCase : Optional[int] = image_size lowerCAmelCase : Tuple = patch_size lowerCAmelCase : Optional[Any] = num_channels lowerCAmelCase : Union[str, Any] = is_training lowerCAmelCase : str = use_labels lowerCAmelCase : Any = hidden_size lowerCAmelCase : int = num_hidden_layers lowerCAmelCase : List[Any] = backbone_out_indices lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Dict = intermediate_size lowerCAmelCase : Optional[Any] = hidden_act lowerCAmelCase : Tuple = hidden_dropout_prob lowerCAmelCase : Dict = attention_probs_dropout_prob lowerCAmelCase : List[Any] = initializer_range lowerCAmelCase : Any = num_labels lowerCAmelCase : Any = backbone_featmap_shape lowerCAmelCase : Tuple = scope lowerCAmelCase : Tuple = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase : str = (image_size // patch_size) ** 2 lowerCAmelCase : Optional[Any] = num_patches + 1 def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase : Any = None if self.use_labels: lowerCAmelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase : int = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Tuple = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [9_6, 1_9_2, 3_8_4, 7_6_8], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=UpperCamelCase_ , backbone_featmap_shape=self.backbone_featmap_shape , ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ): lowerCAmelCase : int = DPTModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : int = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ): lowerCAmelCase : Optional[Any] = self.num_labels lowerCAmelCase : Dict = DPTForDepthEstimation(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model(UpperCamelCase_ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ): lowerCAmelCase : Tuple = self.num_labels lowerCAmelCase : List[str] = DPTForSemanticSegmentation(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : int = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[Any] = config_and_inputs lowerCAmelCase : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () __UpperCamelCase = ( { '''depth-estimation''': DPTForDepthEstimation, '''feature-extraction''': DPTModel, '''image-segmentation''': DPTForSemanticSegmentation, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = DPTModelTester(self ) lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : List[str] ): self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def lowerCamelCase__ ( self : Dict ): pass def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : List[str] = model_class(UpperCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase : Any = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) ) def lowerCamelCase__ ( self : int ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase : Tuple = [*signature.parameters.keys()] lowerCAmelCase : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = True if model_class in get_values(UpperCamelCase_ ): continue lowerCAmelCase : Optional[int] = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() lowerCAmelCase : Optional[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ ).loss loss.backward() def lowerCamelCase__ ( self : List[str] ): for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Tuple = False lowerCAmelCase : Optional[int] = True if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing: continue lowerCAmelCase : int = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.gradient_checkpointing_enable() model.train() lowerCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(**UpperCamelCase_ ).loss loss.backward() def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = _config_zero_init(UpperCamelCase_ ) for model_class in self.all_model_classes: lowerCAmelCase : str = model_class(config=UpperCamelCase_ ) # Skip the check for the backbone lowerCAmelCase : List[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": lowerCAmelCase : str = [F'''{name}.{key}''' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @slow def lowerCamelCase__ ( self : List[str] ): for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: lowerCAmelCase : str = DPTModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : List[str] = '''add''' with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : Tuple = DPTForDepthEstimation(UpperCamelCase_ ) def _snake_case ( ): lowerCAmelCase : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) lowerCAmelCase : Dict = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = prepare_img() lowerCAmelCase : Dict = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): lowerCAmelCase : int = model(**UpperCamelCase_ ) lowerCAmelCase : Any = outputs.predicted_depth # verify the predicted depth lowerCAmelCase : Any = torch.Size((1, 3_8_4, 3_8_4) ) self.assertEqual(predicted_depth.shape , UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor( [[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , UpperCamelCase_ , atol=1E-4 ) )
637
"""simple docstring""" class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : str ): lowerCAmelCase : Dict = val lowerCAmelCase : str = None lowerCAmelCase : Dict = None def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ): if self.val: if val < self.val: if self.left is None: lowerCAmelCase : int = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowerCAmelCase : Any = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = val def _snake_case ( _snake_case : Tuple , _snake_case : str ): # Recursive traversal if root: inorder(root.left , _snake_case ) res.append(root.val ) inorder(root.right , _snake_case ) def _snake_case ( _snake_case : Optional[Any] ): # Build BST if len(_snake_case ) == 0: return arr lowerCAmelCase : Optional[Any] = Node(arr[0] ) for i in range(1 , len(_snake_case ) ): root.insert(arr[i] ) # Traverse BST in order. lowerCAmelCase : Optional[int] = [] inorder(_snake_case , _snake_case ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
637
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[int] , _snake_case : int ): if len(_snake_case ) == 0: return False lowerCAmelCase : List[Any] = len(_snake_case ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _snake_case ) else: return binary_search(a_list[midpoint + 1 :] , _snake_case ) if __name__ == "__main__": snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip() snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip()) snake_case__ : str = '''''' if binary_search(sequence, target) else '''not ''' print(f"""{target} was {not_str}found in {sequence}""")
637
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : int = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case_( a__ ): __UpperCamelCase = '''levit''' def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Tuple = image_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = kernel_size lowerCAmelCase : Dict = stride lowerCAmelCase : List[Any] = padding lowerCAmelCase : Dict = hidden_sizes lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Tuple = depths lowerCAmelCase : Dict = key_dim lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : List[Any] = patch_size lowerCAmelCase : Tuple = attention_ratio lowerCAmelCase : Optional[int] = mlp_ratio lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : List[str] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : Tuple ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : Optional[Any] ): return 1E-4
637
1
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_( a__ ): __UpperCamelCase = ['''image_processor''', '''tokenizer'''] __UpperCamelCase = '''Pix2StructImageProcessor''' __UpperCamelCase = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ): lowerCAmelCase : str = False super().__init__(UpperCamelCase_ , UpperCamelCase_ ) def __call__( self : Tuple , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = 2_0_4_8 , UpperCamelCase_ : int = 0 , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : str , ): if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None and not self.image_processor.is_vqa: lowerCAmelCase : str = self.tokenizer lowerCAmelCase : Optional[int] = self.tokenizer( text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , ) return text_encoding if not self.image_processor.is_vqa: # add pixel_values lowerCAmelCase : Any = self.image_processor( UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_patches=UpperCamelCase_ , **UpperCamelCase_ ) else: # add pixel_values and bbox lowerCAmelCase : Tuple = self.image_processor( UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None and not self.image_processor.is_vqa: lowerCAmelCase : Optional[int] = self.tokenizer( text=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , stride=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_overflowing_tokens=UpperCamelCase_ , return_special_tokens_mask=UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_length=UpperCamelCase_ , verbose=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ , ) if "attention_mask" in text_encoding: lowerCAmelCase : Any = text_encoding.pop('''attention_mask''' ) if "input_ids" in text_encoding: lowerCAmelCase : Tuple = text_encoding.pop('''input_ids''' ) else: lowerCAmelCase : Optional[Any] = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase_ ) return encoding_image_processor def lowerCamelCase__ ( self : str , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ): return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : Dict ): return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @property def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = self.tokenizer.model_input_names lowerCAmelCase : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
637
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : str = 3 lowerCAmelCase : Tuple = 2_5_0 lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) lowerCAmelCase : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : str ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
637
1
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": snake_case__ : int = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) snake_case__ : List[str] = parser.parse_args() snake_case__ : Tuple = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
637
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = None def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : List[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase : List[Any] = [] for i in range(_snake_case ): lowerCAmelCase : int = i / num_diffusion_timesteps lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class snake_case_( a__ , a__ ): @register_to_config def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ ) lowerCAmelCase : str = 1.0 - self.betas lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase : Tuple = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase : Any = 1.0 # setable values lowerCAmelCase : Any = None lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() ) lowerCAmelCase : List[str] = variance_type def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ): return sample def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ): lowerCAmelCase : Any = num_inference_steps lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ): if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : int = self.alphas_cumprod[t] lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : Dict = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : Tuple = self.betas[t] else: lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) ) lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase : Optional[Any] = variance.log() lowerCAmelCase : Union[str, Any] = beta.log() lowerCAmelCase : Dict = (predicted_variance + 1) / 2 lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 ) else: lowerCAmelCase : Optional[int] = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t] lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : int = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : List[Any] = self.betas[t] lowerCAmelCase : Optional[int] = self.alphas[t] else: lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase : Dict = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase : Tuple = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase : Dict = torch.clamp( UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase : int = 0 if t > 0: lowerCAmelCase : Union[str, Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device ) lowerCAmelCase : Any = self._get_variance( UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , ) if self.variance_type == "fixed_small_log": lowerCAmelCase : str = variance elif self.variance_type == "learned_range": lowerCAmelCase : Optional[Any] = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) lowerCAmelCase : List[Any] = variance * variance_noise lowerCAmelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase : int = timesteps.to(original_samples.device ) lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
637
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue_model_parallelism.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''roberta-large''', '''instance_type''': '''ml.p3dn.24xlarge''', '''results''': {'''train_runtime''': 1_600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2}, }, ] ) class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Dict ): if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=UpperCamelCase_ , ) assert hasattr(self , '''env''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[Any] ): # configuration for running training on smdistributed Model Parallel lowerCAmelCase : Union[str, Any] = { '''enabled''': True, '''processes_per_host''': 8, } lowerCAmelCase : Tuple = { '''enabled''': True, '''parameters''': { '''microbatches''': 4, '''placement_strategy''': '''spread''', '''pipeline''': '''interleaved''', '''optimize''': '''speed''', '''partitions''': 4, '''ddp''': True, }, } lowerCAmelCase : Optional[int] = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options} lowerCAmelCase : List[str] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer''' # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase_ , hyperparameters={ **self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path, '''max_steps''': 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase_ , py_version='''py36''' , ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): TrainingJobAnalytics(UpperCamelCase_ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(1,)] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[Any] ): # create estimator lowerCAmelCase : Optional[Any] = self.create_estimator(UpperCamelCase_ ) # run training estimator.fit() # result dataframe lowerCAmelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis lowerCAmelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) lowerCAmelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping lowerCAmelCase : Dict = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase_ )
637
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class snake_case_: def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ): lowerCAmelCase : Any = parent lowerCAmelCase : Any = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[Any] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : Union[str, Any] = use_labels lowerCAmelCase : List[str] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : int = num_hidden_layers lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : List[Any] = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : Tuple = attention_probs_dropout_prob lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Tuple = type_sequence_label_size lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = num_labels lowerCAmelCase : Optional[int] = num_choices lowerCAmelCase : Tuple = scope def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Tuple = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : List[str] = None if self.use_token_type_ids: lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : int = None lowerCAmelCase : int = None lowerCAmelCase : Tuple = None if self.use_labels: lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = True lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ): lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : str = True lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] lowerCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] # select random slice lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = config_and_inputs lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = LlamaModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : str = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : List[str] = 3 lowerCAmelCase : List[str] = input_dict['''input_ids'''] lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : int = '''single_label_classification''' lowerCAmelCase : Tuple = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : Dict = '''multi_label_classification''' lowerCAmelCase : Union[str, Any] = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0} lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) @require_torch class snake_case_( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' lowerCAmelCase : int = '''Simply put, the theory of relativity states that ''' lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ ) # greedy generation outputs lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
637
1
"""simple docstring""" snake_case__ : str = ''' # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git ''' snake_case__ : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] snake_case__ : Tuple = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
637
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ): lowerCAmelCase : Dict = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ): lowerCAmelCase : Optional[int] = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , ) for _ in range(1_0_0_0 ): lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class snake_case_( unittest.TestCase ): __UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase = 10 def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' ) class snake_case_: def __init__( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = fn def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ): return self.fn(*UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
637
1
"""simple docstring""" import os import shutil import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np from datasets import Dataset from transformers.models.realm.configuration_realm import RealmConfig from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer class snake_case_( a__ ): def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : List[Any] = tempfile.mkdtemp() lowerCAmelCase : int = 5 # Realm tok lowerCAmelCase : Union[str, Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''test''', '''question''', '''this''', '''is''', '''the''', '''first''', '''second''', '''third''', '''fourth''', '''fifth''', '''record''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase : Any = os.path.join(self.tmpdirname , '''realm_tokenizer''' ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , '''realm_block_records''' ) os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) ) def lowerCamelCase__ ( self : Union[str, Any] ): shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Tuple = RealmConfig(num_block_records=self.num_block_records ) return config def lowerCamelCase__ ( self : int ): lowerCAmelCase : Dict = Dataset.from_dict( { '''id''': ['''0''', '''1'''], '''question''': ['''foo''', '''bar'''], '''answers''': [['''Foo''', '''Bar'''], ['''Bar''']], } ) return dataset def lowerCamelCase__ ( self : int ): lowerCAmelCase : Optional[int] = np.array( [ b'''This is the first record''', b'''This is the second record''', b'''This is the third record''', b'''This is the fourth record''', b'''This is the fifth record''', b'''This is a longer longer longer record''', ] , dtype=UpperCamelCase_ , ) return block_records def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : str = RealmRetriever( block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , ) return retriever def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : int = self.get_config() lowerCAmelCase : Tuple = self.get_dummy_retriever() lowerCAmelCase : int = retriever.tokenizer lowerCAmelCase : Any = np.array([0, 3] , dtype='''long''' ) lowerCAmelCase : Optional[Any] = tokenizer(['''Test question'''] ).input_ids lowerCAmelCase : List[Any] = tokenizer( ['''the fourth'''] , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ).input_ids lowerCAmelCase : Tuple = config.reader_seq_len lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = retriever( UpperCamelCase_ , UpperCamelCase_ , answer_ids=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' ) self.assertEqual(len(UpperCamelCase_ ) , 2 ) self.assertEqual(len(UpperCamelCase_ ) , 2 ) self.assertEqual(len(UpperCamelCase_ ) , 2 ) self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) ) self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) ) self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , ) self.assertEqual( tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[Any] = self.get_config() lowerCAmelCase : Optional[int] = self.get_dummy_retriever() lowerCAmelCase : Dict = retriever.tokenizer lowerCAmelCase : str = np.array([0, 3, 5] , dtype='''long''' ) lowerCAmelCase : List[str] = tokenizer(['''Test question'''] ).input_ids lowerCAmelCase : int = tokenizer( ['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ).input_ids lowerCAmelCase : Union[str, Any] = config.reader_seq_len lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = retriever( UpperCamelCase_ , UpperCamelCase_ , answer_ids=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' ) self.assertEqual([False, True, True] , UpperCamelCase_ ) self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCamelCase_ ) self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[int] = self.get_dummy_retriever() retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) # Test local path lowerCAmelCase : Any = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' ) # Test mocked remote path with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download: lowerCAmelCase : Optional[int] = os.path.join( os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME ) lowerCAmelCase : Union[str, Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' ) self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
637
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_( a__ ): __UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' __UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) __UpperCamelCase = '''summarizer''' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ['''text'''] __UpperCamelCase = ['''text'''] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ): return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
637
1
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = LayoutLMTokenizer __UpperCamelCase = LayoutLMTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def lowerCamelCase__ ( self : int ): super().setUp() lowerCAmelCase : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def lowerCamelCase__ ( self : int , **UpperCamelCase_ : int ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Any ): lowerCAmelCase : Any = '''UNwant\u00E9d,running''' lowerCAmelCase : Union[str, Any] = '''unwanted, running''' return input_text, output_text def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file ) lowerCAmelCase : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [7, 4, 5, 1_0, 8, 9] ) def lowerCamelCase__ ( self : List[Any] ): pass
637
"""simple docstring""" snake_case__ : List[Any] = '''Tobias Carryer''' from time import time class snake_case_: def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008 lowerCAmelCase : str = multiplier lowerCAmelCase : Optional[int] = increment lowerCAmelCase : Optional[Any] = modulo lowerCAmelCase : Optional[Any] = seed def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
637
1
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ): lowerCAmelCase : Dict = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ): lowerCAmelCase : Optional[int] = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , ) for _ in range(1_0_0_0 ): lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class snake_case_( unittest.TestCase ): __UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase = 10 def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' ) class snake_case_: def __init__( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = fn def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ): return self.fn(*UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
637
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Any = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } snake_case__ : int = { '''google/bigbird-roberta-base''': 4_096, '''google/bigbird-roberta-large''': 4_096, '''google/bigbird-base-trivia-itc''': 4_096, } snake_case__ : Optional[Any] = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BigBirdTokenizer __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = [] def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = vocab_file lowerCAmelCase : Optional[int] = False if not self.vocab_file else True def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : str = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Tuple = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
1
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def _snake_case ( ): lowerCAmelCase : Dict = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=_snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=_snake_case , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=_snake_case , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=_snake_case , default=0 , help='''cuda_id.''' , ) lowerCAmelCase : List[str] = parser.parse_args() return args def _snake_case ( _snake_case : Any , _snake_case : Any , _snake_case : Any ): if not len(_snake_case ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowerCAmelCase, lowerCAmelCase : List[Any] = imgs[0].size lowerCAmelCase : Any = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowerCAmelCase, lowerCAmelCase : List[str] = grid.size for i, img in enumerate(_snake_case ): grid.paste(_snake_case , box=(i % cols * w, i // cols * h) ) return grid def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any]="robotic cat with wings" , _snake_case : Dict=7.5 , _snake_case : Union[str, Any]=50 , _snake_case : Dict=1 , _snake_case : Union[str, Any]=42 , ): lowerCAmelCase : List[Any] = torch.Generator(pipeline.device ).manual_seed(_snake_case ) lowerCAmelCase : Union[str, Any] = pipeline( _snake_case , guidance_scale=_snake_case , num_inference_steps=_snake_case , generator=_snake_case , num_images_per_prompt=_snake_case , ).images lowerCAmelCase : Optional[int] = int(math.sqrt(_snake_case ) ) lowerCAmelCase : int = image_grid(_snake_case , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images snake_case__ : Union[str, Any] = parse_args() # Load models and create wrapper for stable diffusion snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''') snake_case__ : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''') snake_case__ : int = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''') snake_case__ : Dict = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''') snake_case__ : List[Any] = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) snake_case__ : Optional[int] = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')): snake_case__ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, '''unet''', unet) else: snake_case__ : str = unet.to(torch.device('''cuda''', args.cuda_id)) snake_case__ : Union[str, Any] = pipeline.to(unet.device) snake_case__ , snake_case__ : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split())))) snake_case__ : Tuple = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
637
"""simple docstring""" # using dfs for finding eulerian path traversal def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ): lowerCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) return path def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Optional[Any] = -1 for i in range(_snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase : Optional[Any] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ): lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowerCAmelCase : Dict = 1 if check == 2: lowerCAmelCase : int = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case ) print(_snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase : Any = { 1: [], 2: [] # all degree is zero } lowerCAmelCase : List[str] = 10 check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Optional[Any] = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Union[str, Any] = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys snake_case__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
637
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = 0 @slow def lowerCamelCase__ ( self : Dict ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) # Check that tokenizer_type ≠ model_type lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): with pytest.raises(UpperCamelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowerCamelCase__ ( self : str ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowerCamelCase__ ( self : Tuple ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values() lowerCAmelCase : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Any ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?''' lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowerCamelCase__ ( self : int ): lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): # Check we can load the tokenizer config of an online model. lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowerCamelCase__ ( self : Optional[int] ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCamelCase__ ( self : str ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): class snake_case_( a__ ): __UpperCamelCase = False class snake_case_( a__ ): __UpperCamelCase = NewTokenizer __UpperCamelCase = False try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCamelCase__ ( self : str ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' ) def lowerCamelCase__ ( self : int ): with self.assertRaisesRegex( UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def lowerCamelCase__ ( self : Optional[int] ): # Make sure we have cached the tokenizer. lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
637
1
"""simple docstring""" import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = PhobertTokenizer __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[int] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowerCAmelCase : str = ['''T@@''', '''i''', '''I''', '''R@@''', '''r''', '''e@@'''] lowerCAmelCase : Dict = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : Tuple = ['''#version: 0.2''', '''l à</w>'''] lowerCAmelCase : Any = {'''unk_token''': '''<unk>'''} lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[str] , **UpperCamelCase_ : Tuple ): kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Union[str, Any] = '''Tôi là VinAI Research''' lowerCAmelCase : str = '''T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>''' return input_text, output_text def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowerCAmelCase : int = '''Tôi là VinAI Research''' lowerCAmelCase : Any = '''T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'''.split() lowerCAmelCase : Tuple = tokenizer.tokenize(UpperCamelCase_ ) print(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Dict = tokens + [tokenizer.unk_token] lowerCAmelCase : Tuple = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
637
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } snake_case__ : List[Any] = { '''allenai/led-base-16384''': 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _snake_case ( ): lowerCAmelCase : Optional[int] = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCAmelCase : str = bs[:] lowerCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(_snake_case ) cs.append(2**8 + n ) n += 1 lowerCAmelCase : int = [chr(_snake_case ) for n in cs] return dict(zip(_snake_case , _snake_case ) ) def _snake_case ( _snake_case : List[Any] ): lowerCAmelCase : List[str] = set() lowerCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase : Optional[Any] = char return pairs class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ): lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase : Any = json.load(UpperCamelCase_ ) lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()} lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding lowerCAmelCase : List[Any] = bytes_to_unicode() lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : List[Any] = {} lowerCAmelCase : Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase__ ( self : Union[str, Any] ): return len(self.encoder ) def lowerCamelCase__ ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ): if token in self.cache: return self.cache[token] lowerCAmelCase : List[str] = tuple(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase, lowerCAmelCase : Any = bigram lowerCAmelCase : Tuple = [] lowerCAmelCase : Any = 0 while i < len(UpperCamelCase_ ): try: lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase : int = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase : Tuple = tuple(UpperCamelCase_ ) lowerCAmelCase : Tuple = new_word if len(UpperCamelCase_ ) == 1: break else: lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ ) lowerCAmelCase : List[str] = word return word def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Dict = [] for token in re.findall(self.pat , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) ) return bpe_tokens def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ): return self.decoder.get(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : int = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) lowerCAmelCase : Optional[int] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase : Tuple = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] lowerCAmelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): lowerCAmelCase : List[Any] = ''' ''' + text return (text, kwargs) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Dict = super()._pad( encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ ) if needs_to_be_padded: lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase : Dict = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase : int = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
637
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.local_sgd import LocalSGD ######################################################################## # This is a fully working simple example to use Accelerate # with LocalSGD, which is a method to synchronize model # parameters every K batches. It is different, but complementary # to gradient accumulation. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : int = 16 snake_case__ : Any = 32 def _snake_case ( _snake_case : Accelerator , _snake_case : int = 16 ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_snake_case : Union[str, Any] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_snake_case , max_length=_snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase : Union[str, Any] = datasets.map( _snake_case , batched=_snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase : List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_snake_case : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase : int = 8 else: lowerCAmelCase : Optional[int] = None return tokenizer.pad( _snake_case , padding='''longest''' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCAmelCase : Optional[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) lowerCAmelCase : Optional[int] = DataLoader( tokenized_datasets['''validation'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : str = mocked_dataloaders # noqa: F811 def _snake_case ( _snake_case : Any , _snake_case : Optional[int] ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _snake_case ) == "1": lowerCAmelCase : List[str] = 2 # New Code # lowerCAmelCase : Optional[int] = int(args.gradient_accumulation_steps ) lowerCAmelCase : str = int(args.local_sgd_steps ) # Initialize accelerator lowerCAmelCase : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case ) if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]: raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase : Dict = config['''lr'''] lowerCAmelCase : List[Any] = int(config['''num_epochs'''] ) lowerCAmelCase : List[Any] = int(config['''seed'''] ) lowerCAmelCase : int = int(config['''batch_size'''] ) lowerCAmelCase : str = evaluate.load('''glue''' , '''mrpc''' ) set_seed(_snake_case ) lowerCAmelCase, lowerCAmelCase : int = get_dataloaders(_snake_case , _snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase : Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase : str = AdamW(params=model.parameters() , lr=_snake_case ) # Instantiate scheduler lowerCAmelCase : List[Any] = get_linear_schedule_with_warmup( optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = accelerator.prepare( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # Now we train the model for epoch in range(_snake_case ): model.train() with LocalSGD( accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd: for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_snake_case ): lowerCAmelCase : Tuple = model(**_snake_case ) lowerCAmelCase : int = output.loss accelerator.backward(_snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # LocalSGD-specific line local_sgd.step() model.eval() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase : Tuple = model(**_snake_case ) lowerCAmelCase : Dict = outputs.logits.argmax(dim=-1 ) lowerCAmelCase, lowerCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_snake_case , references=_snake_case , ) lowerCAmelCase : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _snake_case ) def _snake_case ( ): lowerCAmelCase : Dict = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_snake_case , default=_snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=_snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument( '''--local_sgd_steps''' , type=_snake_case , default=8 , help='''Number of local SGD steps or None to disable local SGD''' ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCAmelCase : int = parser.parse_args() lowerCAmelCase : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
"""simple docstring""" def _snake_case ( _snake_case : int = 4000000 ): lowerCAmelCase : int = [0, 1] lowerCAmelCase : List[str] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCAmelCase : int = 0 for j in range(len(_snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
637
1
"""simple docstring""" import re def _snake_case ( _snake_case : str ): return [char.split() for char in re.split(r'''[^ a-z A-Z 0-9 \s]''' , str_ )] def _snake_case ( _snake_case : str ): lowerCAmelCase : Optional[int] = split_input(str_ ) return "".join( [''''''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def _snake_case ( _snake_case : str , _snake_case : bool , _snake_case : str ): try: lowerCAmelCase : Optional[int] = split_input(_snake_case ) if upper: lowerCAmelCase : Tuple = ''''''.join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: lowerCAmelCase : str = ''''''.join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def _snake_case ( _snake_case : str ): return to_simple_case(_snake_case ) def _snake_case ( _snake_case : str ): try: lowerCAmelCase : int = to_simple_case(_snake_case ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def _snake_case ( _snake_case : str , _snake_case : bool ): return to_complex_case(_snake_case , _snake_case , '''_''' ) def _snake_case ( _snake_case : str , _snake_case : bool ): return to_complex_case(_snake_case , _snake_case , '''-''' ) if __name__ == "__main__": __import__('''doctest''').testmod()
637
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : list[float] ): if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) ) return round(_snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
637
1
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : float = 0 ): lowerCAmelCase, lowerCAmelCase : List[str] = row, column lowerCAmelCase : List[str] = [[default_value for c in range(UpperCamelCase_ )] for r in range(UpperCamelCase_ )] def __str__( self : str ): lowerCAmelCase : List[str] = F'''Matrix consist of {self.row} rows and {self.column} columns\n''' # Make string identifier lowerCAmelCase : Dict = 0 for row_vector in self.array: for obj in row_vector: lowerCAmelCase : Dict = max(UpperCamelCase_ , len(str(UpperCamelCase_ ) ) ) lowerCAmelCase : Optional[int] = F'''%{max_element_length}s''' # Make string and return def single_line(UpperCamelCase_ : list[float] ) -> str: nonlocal string_format_identifier lowerCAmelCase : List[str] = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(UpperCamelCase_ ) for row_vector in self.array ) return s def __repr__( self : Optional[int] ): return str(self ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : tuple[int, int] ): if not (isinstance(UpperCamelCase_ , (list, tuple) ) and len(UpperCamelCase_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : str , UpperCamelCase_ : tuple[int, int] ): assert self.validate_indicies(UpperCamelCase_ ) return self.array[loc[0]][loc[1]] def __setitem__( self : Any , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : float ): assert self.validate_indicies(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = value def __add__( self : str , UpperCamelCase_ : Matrix ): assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert self.row == another.row and self.column == another.column # Add lowerCAmelCase : Dict = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase : Optional[int] = self[r, c] + another[r, c] return result def __neg__( self : str ): lowerCAmelCase : List[str] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase : Any = -self[r, c] return result def __sub__( self : Dict , UpperCamelCase_ : Matrix ): return self + (-another) def __mul__( self : str , UpperCamelCase_ : int | float | Matrix ): if isinstance(UpperCamelCase_ , (int, float) ): # Scalar multiplication lowerCAmelCase : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase : Tuple = self[r, c] * another return result elif isinstance(UpperCamelCase_ , UpperCamelCase_ ): # Matrix multiplication assert self.column == another.row lowerCAmelCase : int = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: lowerCAmelCase : Dict = F'''Unsupported type given for another ({type(UpperCamelCase_ )})''' raise TypeError(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[int] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): lowerCAmelCase : List[str] = self[r, c] return result def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Matrix , UpperCamelCase_ : Matrix ): assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate lowerCAmelCase : Optional[int] = v.transpose() lowerCAmelCase : Any = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _snake_case ( ): # a^(-1) lowerCAmelCase : List[str] = Matrix(3 , 3 , 0 ) for i in range(3 ): lowerCAmelCase : Union[str, Any] = 1 print(f'''a^(-1) is {ainv}''' ) # u, v lowerCAmelCase : Dict = Matrix(3 , 1 , 0 ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Tuple = 1, 2, -3 lowerCAmelCase : List[str] = Matrix(3 , 1 , 0 ) lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = 4, -2, 5 print(f'''u is {u}''' ) print(f'''v is {v}''' ) print(f'''uv^T is {u * v.transpose()}''' ) # Sherman Morrison print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(_snake_case , _snake_case )}''' ) def _snake_case ( ): import doctest doctest.testmod() testa()
637
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[int] , _snake_case : int ): if len(_snake_case ) == 0: return False lowerCAmelCase : List[Any] = len(_snake_case ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _snake_case ) else: return binary_search(a_list[midpoint + 1 :] , _snake_case ) if __name__ == "__main__": snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip() snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip()) snake_case__ : str = '''''' if binary_search(sequence, target) else '''not ''' print(f"""{target} was {not_str}found in {sequence}""")
637
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Union[str, Any] = { '''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''', } class snake_case_( a__ ): __UpperCamelCase = '''data2vec-text''' def __init__( self : int , UpperCamelCase_ : Optional[int]=3_0_5_2_2 , UpperCamelCase_ : List[Any]=7_6_8 , UpperCamelCase_ : Any=1_2 , UpperCamelCase_ : Optional[Any]=1_2 , UpperCamelCase_ : Union[str, Any]=3_0_7_2 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : int=0.1 , UpperCamelCase_ : Tuple=5_1_2 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : int=1E-12 , UpperCamelCase_ : str=1 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[Any]="absolute" , UpperCamelCase_ : Any=True , UpperCamelCase_ : Any=None , **UpperCamelCase_ : List[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : str = vocab_size lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Union[str, Any] = hidden_act lowerCAmelCase : List[str] = intermediate_size lowerCAmelCase : Tuple = hidden_dropout_prob lowerCAmelCase : Dict = attention_probs_dropout_prob lowerCAmelCase : Optional[int] = max_position_embeddings lowerCAmelCase : Any = type_vocab_size lowerCAmelCase : str = initializer_range lowerCAmelCase : List[str] = layer_norm_eps lowerCAmelCase : str = position_embedding_type lowerCAmelCase : Union[str, Any] = use_cache lowerCAmelCase : List[str] = classifier_dropout class snake_case_( a__ ): @property def lowerCamelCase__ ( self : Optional[Any] ): if self.task == "multiple-choice": lowerCAmelCase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowerCAmelCase : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
637
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict snake_case__ : Optional[Any] = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( _snake_case : Any ): lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case ) lowerCAmelCase : str = TestCommand(*_snake_case ) test_command.run() lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' ) assert os.path.exists(_snake_case ) lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case ) lowerCAmelCase : List[str] = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case ) if key == "num_bytes": assert is_apercent_close(_snake_case , _snake_case ) elif key == "splits": assert list(_snake_case ) == list(_snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
637
1
"""simple docstring""" def _snake_case ( _snake_case : str , _snake_case : str ): lowerCAmelCase : int = len(_snake_case ) lowerCAmelCase : int = len(_snake_case ) lowerCAmelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) lowerCAmelCase : list = [] for char_count in range(_snake_case ): if char_count < first_str_length: output_list.append(first_str[char_count] ) if char_count < second_str_length: output_list.append(second_str[char_count] ) return "".join(_snake_case ) if __name__ == "__main__": print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
637
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ): return base * power(_snake_case , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip()) snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip()) snake_case__ : Any = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents snake_case__ : Dict = 1 / result print(f"""{base} to the power of {exponent} is {result}""")
637
1
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self : Dict ): # searches pattern in text and returns index positions lowerCAmelCase : Union[str, Any] = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ ) if mismatch_index == -1: positions.append(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : str = '''ABAABA''' snake_case__ : List[str] = '''AB''' snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern) snake_case__ : Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
637
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : int = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ): if attention_mask is None: lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case_: def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ): lowerCAmelCase : Tuple = parent lowerCAmelCase : str = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : Optional[int] = is_training lowerCAmelCase : int = use_labels lowerCAmelCase : List[Any] = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : Any = num_attention_heads lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : Optional[int] = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Optional[int] = attention_probs_dropout_prob lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Union[str, Any] = eos_token_id lowerCAmelCase : Dict = pad_token_id lowerCAmelCase : Optional[Any] = bos_token_id lowerCAmelCase : List[str] = initializer_range def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self : str ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ): lowerCAmelCase : int = 2_0 lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : List[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : List[str] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = 2_0 lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : Dict = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ ) lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class snake_case_( unittest.TestCase ): __UpperCamelCase = 99 def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase : List[Any] = input_ids.shape[0] lowerCAmelCase : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data() lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ ) lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ) lowerCAmelCase : str = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case_( a__ , unittest.TestCase , a__ ): __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = FlaxBlenderbotModelTester(self ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) @jax.jit def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ): return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase : List[Any] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): return model.decode( decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase : List[str] = model(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5} lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) lowerCAmelCase : List[Any] = ['''Sam'''] lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' ) lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ ) assert generated_txt[0].strip() == tgt_text
637
1
"""simple docstring""" from string import ascii_uppercase snake_case__ : Union[str, Any] = {str(ord(c) - 55): c for c in ascii_uppercase} def _snake_case ( _snake_case : int , _snake_case : int ): if isinstance(_snake_case , _snake_case ): raise TypeError('''int() can\'t convert non-string with explicit base''' ) if num < 0: raise ValueError('''parameter must be positive int''' ) if isinstance(_snake_case , _snake_case ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if isinstance(_snake_case , _snake_case ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if base in (0, 1): raise ValueError('''base must be >= 2''' ) if base > 36: raise ValueError('''base must be <= 36''' ) lowerCAmelCase : Optional[Any] = '''''' lowerCAmelCase : Union[str, Any] = 0 lowerCAmelCase : Dict = 0 while div != 1: lowerCAmelCase, lowerCAmelCase : Tuple = divmod(_snake_case , _snake_case ) if base >= 11 and 9 < mod < 36: lowerCAmelCase : int = ALPHABET_VALUES[str(_snake_case )] else: lowerCAmelCase : int = str(_snake_case ) new_value += actual_value lowerCAmelCase : List[str] = num // base lowerCAmelCase : List[Any] = div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_snake_case ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(1_000): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
637
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example snake_case__ : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _snake_case ( _snake_case : list[list[int]] ): lowerCAmelCase : Union[str, Any] = [] for i in range(len(_snake_case ) ): lowerCAmelCase : Any = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_snake_case ) return next_generation def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ): lowerCAmelCase : int = [] for _ in range(_snake_case ): # Create output image lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) ) lowerCAmelCase : Union[str, Any] = img.load() # Save cells to image for x in range(len(_snake_case ) ): for y in range(len(cells[0] ) ): lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255 lowerCAmelCase : List[Any] = (colour, colour, colour) # Save image images.append(_snake_case ) lowerCAmelCase : Union[str, Any] = new_generation(_snake_case ) return images if __name__ == "__main__": snake_case__ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
637
1
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class snake_case_: def __init__( self : int , UpperCamelCase_ : str = "cpu" , UpperCamelCase_ : str = "openai/clip-vit-large-patch14" ): lowerCAmelCase : int = device lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = [0.48_145_466, 0.4_578_275, 0.40_821_073] lowerCAmelCase : List[str] = [0.26_862_954, 0.26_130_258, 0.27_577_711] lowerCAmelCase : Dict = torchvision.transforms.Normalize(self.image_mean , self.image_std ) lowerCAmelCase : List[Any] = torchvision.transforms.Resize(2_2_4 ) lowerCAmelCase : str = torchvision.transforms.CenterCrop(2_2_4 ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Optional[Any] = self.resize(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = self.center_crop(UpperCamelCase_ ) lowerCAmelCase : str = self.normalize(UpperCamelCase_ ) return images def __call__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[str]=None , **UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Union[str, Any] = self.tokenizer(text=UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = self.preprocess_img(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = {key: value.to(self.device ) for (key, value) in encoding.items()} return encoding class snake_case_( nn.Module ): def __init__( self : str , UpperCamelCase_ : str=1_0 , UpperCamelCase_ : int=0.01 , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : int=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : Any="image" , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Tuple=False , ): super().__init__() lowerCAmelCase : List[Any] = None lowerCAmelCase : Optional[Any] = device if device else get_device() if vqgan: lowerCAmelCase : Optional[int] = vqgan else: lowerCAmelCase : int = load_vqgan(self.device , conf_path=UpperCamelCase_ , ckpt_path=UpperCamelCase_ ) self.vqgan.eval() if clip: lowerCAmelCase : List[str] = clip else: lowerCAmelCase : Dict = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' ) self.clip.to(self.device ) lowerCAmelCase : Tuple = ProcessorGradientFlow(device=self.device ) lowerCAmelCase : int = iterations lowerCAmelCase : Optional[Any] = lr lowerCAmelCase : Dict = log lowerCAmelCase : List[Any] = make_grid lowerCAmelCase : Dict = return_val lowerCAmelCase : Tuple = quantize lowerCAmelCase : Union[str, Any] = self.vqgan.decoder.z_shape def lowerCamelCase__ ( self : str , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=5 , UpperCamelCase_ : List[Any]=True ): lowerCAmelCase : str = [] if output_path is None: lowerCAmelCase : List[Any] = '''./animation.gif''' if input_path is None: lowerCAmelCase : int = self.save_path lowerCAmelCase : Tuple = sorted(glob(input_path + '''/*''' ) ) if not len(UpperCamelCase_ ): raise ValueError( '''No images found in save path, aborting (did you pass save_intermediate=True to the generate''' ''' function?)''' ) if len(UpperCamelCase_ ) == 1: print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' ) lowerCAmelCase : int = total_duration / len(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = [frame_duration] * len(UpperCamelCase_ ) if extend_frames: lowerCAmelCase : Union[str, Any] = 1.5 lowerCAmelCase : str = 3 for file_name in paths: if file_name.endswith('''.png''' ): images.append(imageio.imread(UpperCamelCase_ ) ) imageio.mimsave(UpperCamelCase_ , UpperCamelCase_ , duration=UpperCamelCase_ ) print(F'''gif saved to {output_path}''' ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None ): if not (path or img): raise ValueError('''Input either path or tensor''' ) if img is not None: raise NotImplementedError lowerCAmelCase : Dict = preprocess(Image.open(UpperCamelCase_ ) , target_image_size=2_5_6 ).to(self.device ) lowerCAmelCase : str = preprocess_vqgan(UpperCamelCase_ ) lowerCAmelCase, *lowerCAmelCase : Dict = self.vqgan.encode(UpperCamelCase_ ) return z def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[str] = self.latent.detach().requires_grad_() lowerCAmelCase : Optional[Any] = base_latent + transform_vector if self.quantize: lowerCAmelCase, *lowerCAmelCase : str = self.vqgan.quantize(UpperCamelCase_ ) else: lowerCAmelCase : int = trans_latent return self.vqgan.decode(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict=None ): lowerCAmelCase : Dict = self.clip_preprocessor(text=UpperCamelCase_ , images=UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ ) lowerCAmelCase : List[str] = self.clip(**UpperCamelCase_ ) lowerCAmelCase : Tuple = clip_outputs.logits_per_image if weights is not None: lowerCAmelCase : Tuple = similarity_logits * weights return similarity_logits.sum() def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Union[str, Any] = self._get_clip_similarity(pos_prompts['''prompts'''] , UpperCamelCase_ , weights=(1 / pos_prompts['''weights''']) ) if neg_prompts: lowerCAmelCase : Optional[int] = self._get_clip_similarity(neg_prompts['''prompts'''] , UpperCamelCase_ , weights=neg_prompts['''weights'''] ) else: lowerCAmelCase : List[Any] = torch.tensor([1] , device=self.device ) lowerCAmelCase : Tuple = -torch.log(UpperCamelCase_ ) + torch.log(UpperCamelCase_ ) return loss def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ): lowerCAmelCase : List[Any] = torch.randn_like(self.latent , requires_grad=UpperCamelCase_ , device=self.device ) lowerCAmelCase : List[Any] = torch.optim.Adam([vector] , lr=self.lr ) for i in range(self.iterations ): optim.zero_grad() lowerCAmelCase : str = self._add_vector(UpperCamelCase_ ) lowerCAmelCase : int = loop_post_process(UpperCamelCase_ ) lowerCAmelCase : List[Any] = self._get_CLIP_loss(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) print('''CLIP loss''' , UpperCamelCase_ ) if self.log: wandb.log({'''CLIP Loss''': clip_loss} ) clip_loss.backward(retain_graph=UpperCamelCase_ ) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0] ) else: yield vector def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : Dict ): wandb.init(reinit=UpperCamelCase_ , project='''face-editor''' ) wandb.config.update({'''Positive Prompts''': positive_prompts} ) wandb.config.update({'''Negative Prompts''': negative_prompts} ) wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} ) if image_path: lowerCAmelCase : Optional[Any] = Image.open(UpperCamelCase_ ) lowerCAmelCase : str = image.resize((2_5_6, 2_5_6) ) wandb.log('''Original Image''' , wandb.Image(UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): if not prompts: return [] lowerCAmelCase : Tuple = [] lowerCAmelCase : Optional[Any] = [] if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split('''|''' )] for prompt in prompts: if isinstance(UpperCamelCase_ , (tuple, list) ): lowerCAmelCase : Optional[int] = prompt[0] lowerCAmelCase : Tuple = float(prompt[1] ) elif ":" in prompt: lowerCAmelCase, lowerCAmelCase : Dict = prompt.split(''':''' ) lowerCAmelCase : List[str] = float(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = prompt lowerCAmelCase : List[str] = 1.0 processed_prompts.append(UpperCamelCase_ ) weights.append(UpperCamelCase_ ) return { "prompts": processed_prompts, "weights": torch.tensor(UpperCamelCase_ , device=self.device ), } def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Any=None , ): if image_path: lowerCAmelCase : Optional[int] = self._get_latent(UpperCamelCase_ ) else: lowerCAmelCase : str = torch.randn(self.latent_dim , device=self.device ) if self.log: self._init_logging(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) assert pos_prompts, "You must provide at least one positive prompt." lowerCAmelCase : str = self.process_prompts(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = self.process_prompts(UpperCamelCase_ ) if save_final and save_path is None: lowerCAmelCase : Tuple = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) ) if not os.path.exists(UpperCamelCase_ ): os.makedirs(UpperCamelCase_ ) else: lowerCAmelCase : List[str] = save_path + '''_''' + get_timestamp() os.makedirs(UpperCamelCase_ ) lowerCAmelCase : Any = save_path lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent )[0] if show_intermediate: print('''Original Image''' ) show_pil(custom_to_pil(UpperCamelCase_ ) ) lowerCAmelCase : str = loop_post_process(UpperCamelCase_ ) for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) ): if show_intermediate: show_pil(UpperCamelCase_ ) if save_intermediate: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) ) if self.log: wandb.log({'''Image''': wandb.Image(UpperCamelCase_ )} ) if show_final: show_pil(UpperCamelCase_ ) if save_final: transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
637
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self : Dict ): # searches pattern in text and returns index positions lowerCAmelCase : Union[str, Any] = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ ) if mismatch_index == -1: positions.append(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : str = '''ABAABA''' snake_case__ : List[str] = '''AB''' snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern) snake_case__ : Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
637
1
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets snake_case__ : Optional[Any] = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' snake_case__ : int = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' snake_case__ : Optional[int] = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_( datasets.Metric ): def lowerCamelCase__ ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[Any]="auto" , UpperCamelCase_ : Dict=-1 , UpperCamelCase_ : Any=0.9 , UpperCamelCase_ : List[Any]=5 , UpperCamelCase_ : str=5_0_0 , UpperCamelCase_ : Union[str, Any]="gpt2-large" , UpperCamelCase_ : List[str]=-1 , UpperCamelCase_ : Optional[Any]=1_0_2_4 , UpperCamelCase_ : Optional[Any]=2_5 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[Any]=2_5 , ): lowerCAmelCase : Optional[Any] = compute_mauve( p_text=UpperCamelCase_ , q_text=UpperCamelCase_ , p_features=UpperCamelCase_ , q_features=UpperCamelCase_ , p_tokens=UpperCamelCase_ , q_tokens=UpperCamelCase_ , num_buckets=UpperCamelCase_ , pca_max_data=UpperCamelCase_ , kmeans_explained_var=UpperCamelCase_ , kmeans_num_redo=UpperCamelCase_ , kmeans_max_iter=UpperCamelCase_ , featurize_model_name=UpperCamelCase_ , device_id=UpperCamelCase_ , max_text_length=UpperCamelCase_ , divergence_curve_discretization_size=UpperCamelCase_ , mauve_scaling_factor=UpperCamelCase_ , verbose=UpperCamelCase_ , seed=UpperCamelCase_ , ) return out
637
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_( a__ ): pass class snake_case_: def __init__( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Any = data lowerCAmelCase : Node | None = None def __iter__( self : int ): lowerCAmelCase : Any = self lowerCAmelCase : Union[str, Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCamelCase_ ) yield node.data lowerCAmelCase : Optional[int] = node.next_node @property def lowerCamelCase__ ( self : str ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": snake_case__ : Dict = Node(1) snake_case__ : Any = Node(2) snake_case__ : int = Node(3) snake_case__ : Any = Node(4) print(root_node.has_loop) # False snake_case__ : Tuple = root_node.next_node print(root_node.has_loop) # True snake_case__ : List[Any] = Node(5) snake_case__ : int = Node(6) snake_case__ : List[Any] = Node(5) snake_case__ : Dict = Node(6) print(root_node.has_loop) # False snake_case__ : Any = Node(1) print(root_node.has_loop) # False
637
1
"""simple docstring""" import math import sys import cva import numpy as np def _snake_case ( _snake_case : np.ndarray , _snake_case : float ): # For applying gaussian function for each element in matrix. lowerCAmelCase : str = math.sqrt(_snake_case ) lowerCAmelCase : Optional[Any] = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def _snake_case ( _snake_case : np.ndarray , _snake_case : int , _snake_case : int , _snake_case : int ): lowerCAmelCase : int = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def _snake_case ( _snake_case : int , _snake_case : float ): # Creates a gaussian kernel of given dimension. lowerCAmelCase : Optional[Any] = np.zeros((kernel_size, kernel_size) ) for i in range(0 , _snake_case ): for j in range(0 , _snake_case ): lowerCAmelCase : Tuple = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(_snake_case , _snake_case ) def _snake_case ( _snake_case : np.ndarray , _snake_case : float , _snake_case : float , _snake_case : int , ): lowerCAmelCase : Any = np.zeros(img.shape ) lowerCAmelCase : List[Any] = get_gauss_kernel(_snake_case , _snake_case ) lowerCAmelCase, lowerCAmelCase : int = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): lowerCAmelCase : Optional[Any] = get_slice(_snake_case , _snake_case , _snake_case , _snake_case ) lowerCAmelCase : str = img_s - img_s[kernel_size // 2, kernel_size // 2] lowerCAmelCase : Dict = vec_gaussian(_snake_case , _snake_case ) lowerCAmelCase : Tuple = np.multiply(_snake_case , _snake_case ) lowerCAmelCase : Optional[Any] = np.multiply(_snake_case , _snake_case ) lowerCAmelCase : Any = np.sum(_snake_case ) / np.sum(_snake_case ) lowerCAmelCase : List[str] = val return imga def _snake_case ( _snake_case : list ): lowerCAmelCase : List[Any] = args[1] if args[1:] else '''../image_data/lena.jpg''' lowerCAmelCase : List[str] = float(args[2] ) if args[2:] else 1.0 lowerCAmelCase : Dict = float(args[3] ) if args[3:] else 1.0 if args[4:]: lowerCAmelCase : Tuple = int(args[4] ) lowerCAmelCase : Dict = kernel_size + abs(kernel_size % 2 - 1 ) else: lowerCAmelCase : List[Any] = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = parse_args(sys.argv) snake_case__ : List[str] = cva.imread(filename, 0) cva.imshow('''input image''', img) snake_case__ : Tuple = img / 255 snake_case__ : Optional[Any] = out.astype('''float32''') snake_case__ : Tuple = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) snake_case__ : int = out * 255 snake_case__ : Optional[Any] = np.uinta(out) cva.imshow('''output image''', out) cva.waitKey(0) cva.destroyAllWindows()
637
"""simple docstring""" from torch import nn class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ): super().__init__() lowerCAmelCase : str = class_size lowerCAmelCase : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase : int = self.mlp(UpperCamelCase_ ) return logits
637
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class snake_case_: __UpperCamelCase = LEDConfig __UpperCamelCase = {} __UpperCamelCase = '''gelu''' def __init__( self : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : List[Any]=7 , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : List[Any]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : int=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : int=3_7 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple=2_0 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : str=0 , UpperCamelCase_ : List[Any]=4 , ): lowerCAmelCase : List[str] = parent lowerCAmelCase : int = batch_size lowerCAmelCase : Union[str, Any] = seq_length lowerCAmelCase : List[Any] = is_training lowerCAmelCase : List[Any] = use_labels lowerCAmelCase : List[str] = vocab_size lowerCAmelCase : Union[str, Any] = hidden_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : int = num_attention_heads lowerCAmelCase : str = intermediate_size lowerCAmelCase : List[str] = hidden_dropout_prob lowerCAmelCase : Dict = attention_probs_dropout_prob lowerCAmelCase : List[str] = max_position_embeddings lowerCAmelCase : Optional[Any] = eos_token_id lowerCAmelCase : Optional[int] = pad_token_id lowerCAmelCase : List[Any] = bos_token_id lowerCAmelCase : List[str] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCAmelCase : Union[str, Any] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCAmelCase : List[Any] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : str = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowerCAmelCase : str = prepare_led_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = tf.concat( [tf.zeros_like(UpperCamelCase_ )[:, :-1], tf.ones_like(UpperCamelCase_ )[:, -1:]] , axis=-1 , ) lowerCAmelCase : Any = global_attention_mask return config, inputs_dict def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple ): lowerCAmelCase : Tuple = TFLEDModel(config=UpperCamelCase_ ).get_decoder() lowerCAmelCase : Optional[Any] = inputs_dict['''input_ids'''] lowerCAmelCase : List[str] = input_ids[:1, :] lowerCAmelCase : Optional[int] = inputs_dict['''attention_mask'''][:1, :] lowerCAmelCase : Union[str, Any] = 1 # first forward pass lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase : Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0] lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase : Dict = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase : str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 ) def _snake_case ( _snake_case : Tuple , _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : str=None , _snake_case : Optional[Any]=None , _snake_case : List[str]=None , _snake_case : List[str]=None , ): if attention_mask is None: lowerCAmelCase : Any = tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase : Union[str, Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () __UpperCamelCase = (TFLEDForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase = True __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = TFLEDModelTester(self ) lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) lowerCAmelCase : Dict = 2 lowerCAmelCase : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) lowerCAmelCase : int = True lowerCAmelCase : str = self.model_tester.seq_length lowerCAmelCase : Tuple = self.model_tester.encoder_seq_length def check_decoder_attentions_output(UpperCamelCase_ : Dict ): lowerCAmelCase : Dict = outputs.decoder_attentions self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(UpperCamelCase_ : List[str] ): lowerCAmelCase : Tuple = [t.numpy() for t in outputs.encoder_attentions] lowerCAmelCase : Dict = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(UpperCamelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowerCAmelCase : str = True lowerCAmelCase : Any = False lowerCAmelCase : List[Any] = False lowerCAmelCase : Union[str, Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Any = len(UpperCamelCase_ ) self.assertEqual(config.output_hidden_states , UpperCamelCase_ ) check_encoder_attentions_output(UpperCamelCase_ ) if self.is_encoder_decoder: lowerCAmelCase : List[str] = model_class(UpperCamelCase_ ) lowerCAmelCase : List[Any] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase_ ) check_decoder_attentions_output(UpperCamelCase_ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase : Any = True lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) lowerCAmelCase : Tuple = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) self.assertEqual(config.output_hidden_states , UpperCamelCase_ ) check_encoder_attentions_output(UpperCamelCase_ ) # Check attention is always last and order is fine lowerCAmelCase : int = True lowerCAmelCase : int = True lowerCAmelCase : str = model_class(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCamelCase_ ) ) self.assertEqual(model.config.output_hidden_states , UpperCamelCase_ ) check_encoder_attentions_output(UpperCamelCase_ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def lowerCamelCase__ ( self : Optional[int] ): pass def lowerCamelCase__ ( self : Optional[int] ): # TODO: Head-masking not yet implement pass def _snake_case ( _snake_case : Union[str, Any] ): return tf.constant(_snake_case , dtype=tf.intaa ) snake_case__ : Tuple = 1e-4 @slow @require_tf class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : str ): lowerCAmelCase : int = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here lowerCAmelCase : List[Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase : str = prepare_led_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = model(**UpperCamelCase_ )[0] lowerCAmelCase : List[str] = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape , UpperCamelCase_ ) # change to expected output here lowerCAmelCase : Optional[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1E-3 ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : str = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here lowerCAmelCase : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowerCAmelCase : Tuple = prepare_led_inputs_dict(model.config , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model(**UpperCamelCase_ )[0] lowerCAmelCase : List[Any] = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape , UpperCamelCase_ ) # change to expected output here lowerCAmelCase : Optional[Any] = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , UpperCamelCase_ , atol=1E-3 , rtol=1E-3 )
637
"""simple docstring""" class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : str ): lowerCAmelCase : Dict = val lowerCAmelCase : str = None lowerCAmelCase : Dict = None def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ): if self.val: if val < self.val: if self.left is None: lowerCAmelCase : int = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowerCAmelCase : Any = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = val def _snake_case ( _snake_case : Tuple , _snake_case : str ): # Recursive traversal if root: inorder(root.left , _snake_case ) res.append(root.val ) inorder(root.right , _snake_case ) def _snake_case ( _snake_case : Optional[Any] ): # Build BST if len(_snake_case ) == 0: return arr lowerCAmelCase : Optional[Any] = Node(arr[0] ) for i in range(1 , len(_snake_case ) ): root.insert(arr[i] ) # Traverse BST in order. lowerCAmelCase : Optional[int] = [] inorder(_snake_case , _snake_case ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
637
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''sentencepiece.model'''} snake_case__ : Dict = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, } snake_case__ : Tuple = { '''google/rembert''': 256, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : str=False , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : Any="[SEP]" , UpperCamelCase_ : Union[str, Any]="[UNK]" , UpperCamelCase_ : Union[str, Any]="[SEP]" , UpperCamelCase_ : Optional[int]="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : Optional[int]="[MASK]" , **UpperCamelCase_ : Union[str, Any] , ): super().__init__( do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Dict = do_lower_case lowerCAmelCase : List[Any] = remove_space lowerCAmelCase : Any = keep_accents lowerCAmelCase : Optional[Any] = vocab_file lowerCAmelCase : Dict = spm.SentencePieceProcessor() self.sp_model.Load(UpperCamelCase_ ) @property def lowerCamelCase__ ( self : Tuple ): return len(self.sp_model ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Any = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : int ): lowerCAmelCase : Optional[Any] = self.__dict__.copy() lowerCAmelCase : Optional[Any] = None return state def __setstate__( self : List[str] , UpperCamelCase_ : List[Any] ): lowerCAmelCase : List[str] = d lowerCAmelCase : Optional[int] = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int=False ): lowerCAmelCase : List[str] = self.sp_model.EncodeAsPieces(UpperCamelCase_ ) return pieces def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Dict ): return self.sp_model.PieceToId(UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Dict ): return self.sp_model.IdToPiece(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : int = self.sp_model.decode_pieces(UpperCamelCase_ ) return out_string def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : List[Any] = [self.sep_token_id] lowerCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Union[str, Any] = [self.sep_token_id] lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase_ ) ) return lowerCAmelCase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : int = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case_( a__ ): __UpperCamelCase = '''levit''' def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Tuple = image_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = kernel_size lowerCAmelCase : Dict = stride lowerCAmelCase : List[Any] = padding lowerCAmelCase : Dict = hidden_sizes lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Tuple = depths lowerCAmelCase : Dict = key_dim lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : List[Any] = patch_size lowerCAmelCase : Tuple = attention_ratio lowerCAmelCase : Optional[int] = mlp_ratio lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : List[str] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : Tuple ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : Optional[Any] ): return 1E-4
637
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## snake_case__ : str = 16 snake_case__ : Union[str, Any] = 32 def _snake_case ( _snake_case : Accelerator , _snake_case : int = 16 ): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) lowerCAmelCase : Union[str, Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_snake_case : List[Any] ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_snake_case , max_length=_snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCAmelCase : List[Any] = datasets.map( _snake_case , batched=_snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase : int = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_snake_case : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCAmelCase : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCAmelCase : List[str] = 16 elif accelerator.mixed_precision != "no": lowerCAmelCase : Any = 8 else: lowerCAmelCase : Optional[Any] = None return tokenizer.pad( _snake_case , padding='''longest''' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. lowerCAmelCase : Any = DataLoader( tokenized_datasets['''train'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) lowerCAmelCase : Union[str, Any] = DataLoader( tokenized_datasets['''validation'''] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders snake_case__ : Any = mocked_dataloaders # noqa: F811 def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _snake_case ) == "1": lowerCAmelCase : Union[str, Any] = 2 # New Code # lowerCAmelCase : Optional[int] = int(args.gradient_accumulation_steps ) # Initialize accelerator lowerCAmelCase : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( '''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase : Union[str, Any] = config['''lr'''] lowerCAmelCase : List[str] = int(config['''num_epochs'''] ) lowerCAmelCase : Optional[Any] = int(config['''seed'''] ) lowerCAmelCase : Optional[Any] = int(config['''batch_size'''] ) lowerCAmelCase : int = evaluate.load('''glue''' , '''mrpc''' ) set_seed(_snake_case ) lowerCAmelCase, lowerCAmelCase : Any = get_dataloaders(_snake_case , _snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCAmelCase : Any = model.to(accelerator.device ) # Instantiate optimizer lowerCAmelCase : Tuple = AdamW(params=model.parameters() , lr=_snake_case ) # Instantiate scheduler lowerCAmelCase : Optional[Any] = get_linear_schedule_with_warmup( optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = accelerator.prepare( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # Now we train the model for epoch in range(_snake_case ): model.train() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(_snake_case ): lowerCAmelCase : Optional[int] = model(**_snake_case ) lowerCAmelCase : str = output.loss accelerator.backward(_snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase : Tuple = model(**_snake_case ) lowerCAmelCase : str = outputs.logits.argmax(dim=-1 ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_snake_case , references=_snake_case , ) lowerCAmelCase : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_snake_case , default=_snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=_snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) lowerCAmelCase : Any = parser.parse_args() lowerCAmelCase : Optional[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : str = 3 lowerCAmelCase : Tuple = 2_5_0 lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) lowerCAmelCase : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : str ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
637
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class snake_case_( a__ ): __UpperCamelCase = ['''image_processor''', '''feature_extractor'''] __UpperCamelCase = '''TvltImageProcessor''' __UpperCamelCase = '''TvltFeatureExtractor''' def __init__( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str ): super().__init__(image_processor=UpperCamelCase_ , feature_extractor=UpperCamelCase_ ) lowerCAmelCase : int = image_processor lowerCAmelCase : List[str] = feature_extractor def __call__( self : Optional[int] , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Dict=False , UpperCamelCase_ : List[Any]=False , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict , ): if images is None and audio is None: raise ValueError('''You need to specify either an `images` or `audio` input to process.''' ) lowerCAmelCase : str = None if images is not None: lowerCAmelCase : Tuple = self.image_processor(UpperCamelCase_ , mask_pixel=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) if images_mixed is not None: lowerCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase_ , is_mixed=UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) if audio is not None: lowerCAmelCase : Union[str, Any] = self.feature_extractor( UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , mask_audio=UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : str = {} if audio is not None: output_dict.update(UpperCamelCase_ ) if images is not None: output_dict.update(UpperCamelCase_ ) if images_mixed_dict is not None: output_dict.update(UpperCamelCase_ ) return output_dict @property def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Dict = self.image_processor.model_input_names lowerCAmelCase : str = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
637
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = None def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : List[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase : List[Any] = [] for i in range(_snake_case ): lowerCAmelCase : int = i / num_diffusion_timesteps lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class snake_case_( a__ , a__ ): @register_to_config def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ ) lowerCAmelCase : str = 1.0 - self.betas lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase : Tuple = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase : Any = 1.0 # setable values lowerCAmelCase : Any = None lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() ) lowerCAmelCase : List[str] = variance_type def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ): return sample def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ): lowerCAmelCase : Any = num_inference_steps lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ): if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : int = self.alphas_cumprod[t] lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : Dict = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : Tuple = self.betas[t] else: lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) ) lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase : Optional[Any] = variance.log() lowerCAmelCase : Union[str, Any] = beta.log() lowerCAmelCase : Dict = (predicted_variance + 1) / 2 lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 ) else: lowerCAmelCase : Optional[int] = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t] lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : int = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : List[Any] = self.betas[t] lowerCAmelCase : Optional[int] = self.alphas[t] else: lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase : Dict = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase : Tuple = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase : Dict = torch.clamp( UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase : int = 0 if t > 0: lowerCAmelCase : Union[str, Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device ) lowerCAmelCase : Any = self._get_variance( UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , ) if self.variance_type == "fixed_small_log": lowerCAmelCase : str = variance elif self.variance_type == "learned_range": lowerCAmelCase : Optional[Any] = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) lowerCAmelCase : List[Any] = variance * variance_noise lowerCAmelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase : int = timesteps.to(original_samples.device ) lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
637
1
"""simple docstring""" def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any ): lowerCAmelCase : Tuple = [1] for i in range(2 , _snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" lowerCAmelCase : str = [] lowerCAmelCase : Any = list(range(_snake_case ) ) # Find permutation while factorials: lowerCAmelCase : List[Any] = factorials.pop() lowerCAmelCase, lowerCAmelCase : str = divmod(_snake_case , _snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class snake_case_: def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ): lowerCAmelCase : Any = parent lowerCAmelCase : Any = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[Any] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : Union[str, Any] = use_labels lowerCAmelCase : List[str] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : int = num_hidden_layers lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : List[Any] = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : Tuple = attention_probs_dropout_prob lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Tuple = type_sequence_label_size lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = num_labels lowerCAmelCase : Optional[int] = num_choices lowerCAmelCase : Tuple = scope def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Tuple = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : List[str] = None if self.use_token_type_ids: lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : int = None lowerCAmelCase : int = None lowerCAmelCase : Tuple = None if self.use_labels: lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = True lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ): lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : str = True lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] lowerCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] # select random slice lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = config_and_inputs lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = LlamaModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : str = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : List[str] = 3 lowerCAmelCase : List[str] = input_dict['''input_ids'''] lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : int = '''single_label_classification''' lowerCAmelCase : Tuple = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : Dict = '''multi_label_classification''' lowerCAmelCase : Union[str, Any] = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0} lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) @require_torch class snake_case_( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' lowerCAmelCase : int = '''Simply put, the theory of relativity states that ''' lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ ) # greedy generation outputs lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
637
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Any = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } snake_case__ : int = { '''google/bigbird-roberta-base''': 4_096, '''google/bigbird-roberta-large''': 4_096, '''google/bigbird-base-trivia-itc''': 4_096, } snake_case__ : Optional[Any] = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BigBirdTokenizer __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = [] def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = vocab_file lowerCAmelCase : Optional[int] = False if not self.vocab_file else True def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : str = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Tuple = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ): lowerCAmelCase : Dict = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ): lowerCAmelCase : Optional[int] = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , ) for _ in range(1_0_0_0 ): lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class snake_case_( unittest.TestCase ): __UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase = 10 def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' ) class snake_case_: def __init__( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = fn def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ): return self.fn(*UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
637
1
"""simple docstring""" import requests from bsa import BeautifulSoup def _snake_case ( _snake_case : str = "https://www.worldometers.info/coronavirus" ): lowerCAmelCase : str = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' ) lowerCAmelCase : Any = soup.findAll('''h1''' ) lowerCAmelCase : Optional[Any] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} ) keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} ) values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} ) return {key.text.strip(): value.text.strip() for key, value in zip(_snake_case , _snake_case )} if __name__ == "__main__": print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''') for key, value in world_covidaa_stats().items(): print(f"""{key}\n{value}\n""")
637
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_( a__ ): __UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' __UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) __UpperCamelCase = '''summarizer''' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ['''text'''] __UpperCamelCase = ['''text'''] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ): return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
637
1
"""simple docstring""" import random def _snake_case ( _snake_case : int , _snake_case : float , _snake_case : bool = False ): lowerCAmelCase : dict = {i: [] for i in range(_snake_case )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(_snake_case ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(_snake_case ): for j in range(i + 1 , _snake_case ): if random.random() < probability: graph[i].append(_snake_case ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(_snake_case ) return graph def _snake_case ( _snake_case : int ): return { i: [j for j in range(_snake_case ) if i != j] for i in range(_snake_case ) } if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" snake_case__ : List[Any] = '''Tobias Carryer''' from time import time class snake_case_: def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008 lowerCAmelCase : str = multiplier lowerCAmelCase : Optional[int] = increment lowerCAmelCase : Optional[Any] = modulo lowerCAmelCase : Optional[Any] = seed def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
637
1
"""simple docstring""" from bisect import bisect from itertools import accumulate def _snake_case ( _snake_case : str , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Optional[Any] ): lowerCAmelCase : Optional[int] = sorted(zip(_snake_case , _snake_case ) , key=lambda _snake_case : x[0] / x[1] , reverse=_snake_case ) lowerCAmelCase, lowerCAmelCase : Tuple = [i[0] for i in r], [i[1] for i in r] lowerCAmelCase : Optional[Any] = list(accumulate(_snake_case ) ) lowerCAmelCase : str = bisect(_snake_case , _snake_case ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Any = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } snake_case__ : int = { '''google/bigbird-roberta-base''': 4_096, '''google/bigbird-roberta-large''': 4_096, '''google/bigbird-base-trivia-itc''': 4_096, } snake_case__ : Optional[Any] = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BigBirdTokenizer __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = [] def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = vocab_file lowerCAmelCase : Optional[int] = False if not self.vocab_file else True def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : str = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Tuple = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
1
"""simple docstring""" import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) snake_case__ : Optional[int] = logging.getLogger(__name__) def _snake_case ( _snake_case : str ): lowerCAmelCase : Dict = git.Repo(search_parent_directories=_snake_case ) lowerCAmelCase : int = { '''repo_id''': str(_snake_case ), '''repo_sha''': str(repo.head.object.hexsha ), '''repo_branch''': str(repo.active_branch ), } with open(os.path.join(_snake_case , '''git_log.json''' ) , '''w''' ) as f: json.dump(_snake_case , _snake_case , indent=4 ) def _snake_case ( _snake_case : List[str] ): if params.n_gpu <= 0: lowerCAmelCase : int = 0 lowerCAmelCase : Optional[int] = -1 lowerCAmelCase : List[str] = True lowerCAmelCase : List[str] = False return assert torch.cuda.is_available() logger.info('''Initializing GPUs''' ) if params.n_gpu > 1: assert params.local_rank != -1 lowerCAmelCase : int = int(os.environ['''WORLD_SIZE'''] ) lowerCAmelCase : str = int(os.environ['''N_GPU_NODE'''] ) lowerCAmelCase : Any = int(os.environ['''RANK'''] ) # number of nodes / node ID lowerCAmelCase : str = params.world_size // params.n_gpu_per_node lowerCAmelCase : Optional[int] = params.global_rank // params.n_gpu_per_node lowerCAmelCase : Any = True assert params.n_nodes == int(os.environ['''N_NODES'''] ) assert params.node_id == int(os.environ['''NODE_RANK'''] ) # local job (single GPU) else: assert params.local_rank == -1 lowerCAmelCase : int = 1 lowerCAmelCase : int = 0 lowerCAmelCase : Optional[Any] = 0 lowerCAmelCase : Union[str, Any] = 0 lowerCAmelCase : Any = 1 lowerCAmelCase : Any = 1 lowerCAmelCase : Any = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode lowerCAmelCase : int = params.node_id == 0 and params.local_rank == 0 lowerCAmelCase : str = params.n_nodes > 1 # summary lowerCAmelCase : List[str] = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes ) logger.info(PREFIX + '''Node ID : %i''' % params.node_id ) logger.info(PREFIX + '''Local rank : %i''' % params.local_rank ) logger.info(PREFIX + '''World size : %i''' % params.world_size ) logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node ) logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) ) logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) ) logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) ) logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('''Initializing PyTorch distributed''' ) torch.distributed.init_process_group( init_method='''env://''' , backend='''nccl''' , ) def _snake_case ( _snake_case : Dict ): np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
637
"""simple docstring""" # using dfs for finding eulerian path traversal def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ): lowerCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) return path def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Optional[Any] = -1 for i in range(_snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase : Optional[Any] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ): lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowerCAmelCase : Dict = 1 if check == 2: lowerCAmelCase : int = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case ) print(_snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase : Any = { 1: [], 2: [] # all degree is zero } lowerCAmelCase : List[str] = 10 check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
1
"""simple docstring""" import math import unittest def _snake_case ( _snake_case : int ): assert isinstance(_snake_case , _snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Union[str, Any] ): self.assertTrue(is_prime(2 ) ) self.assertTrue(is_prime(3 ) ) self.assertTrue(is_prime(5 ) ) self.assertTrue(is_prime(7 ) ) self.assertTrue(is_prime(1_1 ) ) self.assertTrue(is_prime(1_3 ) ) self.assertTrue(is_prime(1_7 ) ) self.assertTrue(is_prime(1_9 ) ) self.assertTrue(is_prime(2_3 ) ) self.assertTrue(is_prime(2_9 ) ) def lowerCamelCase__ ( self : str ): with self.assertRaises(UpperCamelCase_ ): is_prime(-1_9 ) self.assertFalse( is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , ) self.assertFalse( is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , ) self.assertFalse(is_prime(2 * 2 ) ) self.assertFalse(is_prime(2 * 3 ) ) self.assertFalse(is_prime(3 * 3 ) ) self.assertFalse(is_prime(3 * 5 ) ) self.assertFalse(is_prime(3 * 5 * 7 ) ) if __name__ == "__main__": unittest.main()
637
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = 0 @slow def lowerCamelCase__ ( self : Dict ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) # Check that tokenizer_type ≠ model_type lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): with pytest.raises(UpperCamelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowerCamelCase__ ( self : str ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowerCamelCase__ ( self : Tuple ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values() lowerCAmelCase : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Any ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?''' lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowerCamelCase__ ( self : int ): lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): # Check we can load the tokenizer config of an online model. lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowerCamelCase__ ( self : Optional[int] ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCamelCase__ ( self : str ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): class snake_case_( a__ ): __UpperCamelCase = False class snake_case_( a__ ): __UpperCamelCase = NewTokenizer __UpperCamelCase = False try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCamelCase__ ( self : str ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' ) def lowerCamelCase__ ( self : int ): with self.assertRaisesRegex( UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def lowerCamelCase__ ( self : Optional[int] ): # Make sure we have cached the tokenizer. lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
637
1
"""simple docstring""" # using dfs for finding eulerian path traversal def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ): lowerCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) return path def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Optional[Any] = -1 for i in range(_snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase : Optional[Any] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ): lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowerCAmelCase : Dict = 1 if check == 2: lowerCAmelCase : int = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case ) print(_snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase : Any = { 1: [], 2: [] # all degree is zero } lowerCAmelCase : List[str] = 10 check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } snake_case__ : List[Any] = { '''allenai/led-base-16384''': 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _snake_case ( ): lowerCAmelCase : Optional[int] = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCAmelCase : str = bs[:] lowerCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(_snake_case ) cs.append(2**8 + n ) n += 1 lowerCAmelCase : int = [chr(_snake_case ) for n in cs] return dict(zip(_snake_case , _snake_case ) ) def _snake_case ( _snake_case : List[Any] ): lowerCAmelCase : List[str] = set() lowerCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase : Optional[Any] = char return pairs class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ): lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase : Any = json.load(UpperCamelCase_ ) lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()} lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding lowerCAmelCase : List[Any] = bytes_to_unicode() lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : List[Any] = {} lowerCAmelCase : Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase__ ( self : Union[str, Any] ): return len(self.encoder ) def lowerCamelCase__ ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ): if token in self.cache: return self.cache[token] lowerCAmelCase : List[str] = tuple(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase, lowerCAmelCase : Any = bigram lowerCAmelCase : Tuple = [] lowerCAmelCase : Any = 0 while i < len(UpperCamelCase_ ): try: lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase : int = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase : Tuple = tuple(UpperCamelCase_ ) lowerCAmelCase : Tuple = new_word if len(UpperCamelCase_ ) == 1: break else: lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ ) lowerCAmelCase : List[str] = word return word def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Dict = [] for token in re.findall(self.pat , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) ) return bpe_tokens def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ): return self.decoder.get(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : int = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) lowerCAmelCase : Optional[int] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase : Tuple = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] lowerCAmelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): lowerCAmelCase : List[Any] = ''' ''' + text return (text, kwargs) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Dict = super()._pad( encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ ) if needs_to_be_padded: lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase : Dict = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase : int = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
637
1
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def _snake_case ( _snake_case : Features ): lowerCAmelCase : int = np.inf def set_batch_size(_snake_case : FeatureType ) -> None: nonlocal batch_size if isinstance(_snake_case , _snake_case ): lowerCAmelCase : int = min(_snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_snake_case , _snake_case ): lowerCAmelCase : str = min(_snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_snake_case , _snake_case ) and feature.dtype == "binary": lowerCAmelCase : Dict = min(_snake_case , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_snake_case , _snake_case ) return None if batch_size is np.inf else batch_size class snake_case_( a__ ): def __init__( self : int , UpperCamelCase_ : NestedDataStructureLike[PathLike] , UpperCamelCase_ : Optional[NamedSplit] = None , UpperCamelCase_ : Optional[Features] = None , UpperCamelCase_ : str = None , UpperCamelCase_ : bool = False , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : List[str] , ): super().__init__( UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths} lowerCAmelCase : Optional[Any] = _PACKAGED_DATASETS_MODULES['''parquet'''][1] lowerCAmelCase : Optional[Any] = Parquet( cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , hash=UpperCamelCase_ , **UpperCamelCase_ , ) def lowerCamelCase__ ( self : Optional[Any] ): # Build iterable dataset if self.streaming: lowerCAmelCase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowerCAmelCase : Optional[Any] = None lowerCAmelCase : List[Any] = None lowerCAmelCase : Union[str, Any] = None lowerCAmelCase : str = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) lowerCAmelCase : Dict = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset class snake_case_: def __init__( self : Optional[int] , UpperCamelCase_ : Dataset , UpperCamelCase_ : Union[PathLike, BinaryIO] , UpperCamelCase_ : Optional[int] = None , **UpperCamelCase_ : Union[str, Any] , ): lowerCAmelCase : Optional[int] = dataset lowerCAmelCase : str = path_or_buf lowerCAmelCase : Optional[int] = batch_size or get_writer_batch_size(dataset.features ) lowerCAmelCase : Optional[Any] = parquet_writer_kwargs def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[Any] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: lowerCAmelCase : Any = self._write(file_obj=UpperCamelCase_ , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs ) else: lowerCAmelCase : List[str] = self._write(file_obj=self.path_or_buf , batch_size=UpperCamelCase_ , **self.parquet_writer_kwargs ) return written def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : BinaryIO , UpperCamelCase_ : int , **UpperCamelCase_ : Dict ): lowerCAmelCase : str = 0 lowerCAmelCase : int = parquet_writer_kwargs.pop('''path_or_buf''' , UpperCamelCase_ ) lowerCAmelCase : List[str] = self.dataset.features.arrow_schema lowerCAmelCase : Tuple = pq.ParquetWriter(UpperCamelCase_ , schema=UpperCamelCase_ , **UpperCamelCase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , UpperCamelCase_ ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): lowerCAmelCase : Union[str, Any] = query_table( table=self.dataset._data , key=slice(UpperCamelCase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(UpperCamelCase_ ) written += batch.nbytes writer.close() return written
637
"""simple docstring""" def _snake_case ( _snake_case : int = 4000000 ): lowerCAmelCase : int = [0, 1] lowerCAmelCase : List[str] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCAmelCase : int = 0 for j in range(len(_snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
637
1
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class snake_case_: def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] ): raise NotImplementedError() def lowerCamelCase__ ( self : List[Any] ): raise NotImplementedError() class snake_case_( a__ ): def __init__( self : Optional[int] , UpperCamelCase_ : "AutoTokenizer" , UpperCamelCase_ : bool = False , **UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = tokenizer lowerCAmelCase : str = skip_prompt lowerCAmelCase : Dict = decode_kwargs # variables used in the streaming process lowerCAmelCase : List[Any] = [] lowerCAmelCase : Optional[int] = 0 lowerCAmelCase : Union[str, Any] = True def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] ): if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: lowerCAmelCase : Optional[Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowerCAmelCase : Optional[int] = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowerCAmelCase : List[Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): lowerCAmelCase : Tuple = text[self.print_len :] lowerCAmelCase : List[Any] = [] lowerCAmelCase : str = 0 # If the last token is a CJK character, we print the characters. elif len(UpperCamelCase_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowerCAmelCase : Tuple = text[self.print_len :] self.print_len += len(UpperCamelCase_ ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowerCAmelCase : Dict = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(UpperCamelCase_ ) self.on_finalized_text(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): # Flush the cache, if it exists if len(self.token_cache ) > 0: lowerCAmelCase : Dict = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) lowerCAmelCase : Optional[int] = text[self.print_len :] lowerCAmelCase : Dict = [] lowerCAmelCase : Tuple = 0 else: lowerCAmelCase : List[str] = '''''' lowerCAmelCase : Tuple = True self.on_finalized_text(UpperCamelCase_ , stream_end=UpperCamelCase_ ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : bool = False ): print(UpperCamelCase_ , flush=UpperCamelCase_ , end='''''' if not stream_end else None ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[Any] ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class snake_case_( a__ ): def __init__( self : Any , UpperCamelCase_ : "AutoTokenizer" , UpperCamelCase_ : bool = False , UpperCamelCase_ : Optional[float] = None , **UpperCamelCase_ : List[str] ): super().__init__(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = Queue() lowerCAmelCase : Dict = None lowerCAmelCase : Dict = timeout def lowerCamelCase__ ( self : Any , UpperCamelCase_ : str , UpperCamelCase_ : bool = False ): self.text_queue.put(UpperCamelCase_ , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : Union[str, Any] ): return self def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[int] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
637
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : list[float] ): if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) ) return round(_snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
637
1
"""simple docstring""" from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class snake_case_( a__ ): __UpperCamelCase = '''new-model''' if is_tf_available(): class snake_case_( a__ ): __UpperCamelCase = NewModelConfig @require_tf class snake_case_( unittest.TestCase ): @slow def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = '''bert-base-cased''' lowerCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Optional[int] = '''bert-base-cased''' lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = TFAutoModelForPreTraining.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Any = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Optional[Any] ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase : str = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase_ , output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: lowerCAmelCase : Tuple = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : Dict ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TFAutoModelForQuestionAnswering.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @slow @require_tensorflow_probability def lowerCamelCase__ ( self : int ): for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Any = TFAutoModelForTableQuestionAnswering.from_pretrained(UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained( UpperCamelCase_ , output_loading_info=UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : str = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 1_4_4_1_0 ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=UpperCamelCase_ ) , 1_4_4_1_0 ) def lowerCamelCase__ ( self : str ): # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel lowerCAmelCase : Dict = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = copy.deepcopy(model.config ) lowerCAmelCase : List[str] = ['''FunnelBaseModel'''] lowerCAmelCase : List[Any] = TFAutoModel.from_config(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): try: AutoConfig.register('''new-model''' , UpperCamelCase_ ) lowerCAmelCase : Dict = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(UpperCamelCase_ ): auto_class.register(UpperCamelCase_ , UpperCamelCase_ ) auto_class.register(UpperCamelCase_ , UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): auto_class.register(UpperCamelCase_ , UpperCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase : List[Any] = BertModelTester(self ).get_config() lowerCAmelCase : Optional[Any] = NewModelConfig(**tiny_config.to_dict() ) lowerCAmelCase : str = auto_class.from_config(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = auto_class.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def lowerCamelCase__ ( self : List[str] ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('''bert-base''' ) def lowerCamelCase__ ( self : List[str] ): with self.assertRaisesRegex( UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def lowerCamelCase__ ( self : str ): with self.assertRaisesRegex( UpperCamelCase_ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): lowerCAmelCase : List[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def lowerCamelCase__ ( self : List[str] ): with self.assertRaisesRegex(UpperCamelCase_ , '''Use `from_pt=True` to load this model''' ): lowerCAmelCase : Union[str, Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def lowerCamelCase__ ( self : str ): # Make sure we have cached the model. lowerCAmelCase : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase : str = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint lowerCAmelCase : int = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
637
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[int] , _snake_case : int ): if len(_snake_case ) == 0: return False lowerCAmelCase : List[Any] = len(_snake_case ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _snake_case ) else: return binary_search(a_list[midpoint + 1 :] , _snake_case ) if __name__ == "__main__": snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip() snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip()) snake_case__ : str = '''''' if binary_search(sequence, target) else '''not ''' print(f"""{target} was {not_str}found in {sequence}""")
637
1
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset snake_case__ : List[str] = '''bert-base-cased''' snake_case__ : Optional[Any] = '''google/pegasus-xsum''' snake_case__ : Optional[int] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] snake_case__ : List[str] = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] snake_case__ : int = '''patrickvonplaten/t5-tiny-random''' snake_case__ : str = '''sshleifer/bart-tiny-random''' snake_case__ : Optional[Any] = '''sshleifer/tiny-mbart''' snake_case__ : str = '''sshleifer/tiny-marian-en-de''' def _snake_case ( _snake_case : Path , _snake_case : list ): lowerCAmelCase : Any = '''\n'''.join(_snake_case ) Path(_snake_case ).open('''w''' ).writelines(_snake_case ) def _snake_case ( _snake_case : str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(_snake_case , f'''{split}.source''' ) , _snake_case ) _dump_articles(os.path.join(_snake_case , f'''{split}.target''' ) , _snake_case ) return tmp_dir class snake_case_( a__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in ARTICLES ) lowerCAmelCase : List[str] = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in SUMMARIES ) lowerCAmelCase : str = 4 lowerCAmelCase : int = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated lowerCAmelCase, lowerCAmelCase : Optional[int] = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error. lowerCAmelCase : Tuple = SeqaSeqDataset( UpperCamelCase_ , data_dir=UpperCamelCase_ , type_path='''train''' , max_source_length=UpperCamelCase_ , max_target_length=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place lowerCAmelCase : Optional[int] = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) lowerCAmelCase : Optional[Any] = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in ARTICLES ) lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(UpperCamelCase_ ) ) for a in SUMMARIES ) lowerCAmelCase : List[Any] = 4 lowerCAmelCase : Optional[Any] = LegacySeqaSeqDataset( UpperCamelCase_ , data_dir=UpperCamelCase_ , type_path='''train''' , max_source_length=2_0 , max_target_length=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase_ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' ) lowerCAmelCase : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) lowerCAmelCase : List[str] = tmp_dir.joinpath('''train.source''' ).open().readlines() lowerCAmelCase : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(UpperCamelCase_ , UpperCamelCase_ , 1_2_8 , UpperCamelCase_ ) lowerCAmelCase : Any = {x.name for x in tmp_dir.iterdir()} lowerCAmelCase : Optional[int] = {x.name for x in save_dir.iterdir()} lowerCAmelCase : Tuple = save_dir.joinpath('''train.source''' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(UpperCamelCase_ ) < len(UpperCamelCase_ ) assert len(UpperCamelCase_ ) == 1 assert len(packed_examples[0] ) == sum(len(UpperCamelCase_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' ) def lowerCamelCase__ ( self : Tuple ): if not FAIRSEQ_AVAILABLE: return lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[str] = self._get_dataset(max_len=6_4 ) lowerCAmelCase : Any = 6_4 lowerCAmelCase : Dict = ds.make_dynamic_sampler(UpperCamelCase_ , required_batch_size_multiple=UpperCamelCase_ ) lowerCAmelCase : List[Any] = [len(UpperCamelCase_ ) for x in batch_sampler] assert len(set(UpperCamelCase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(UpperCamelCase_ ) == len(UpperCamelCase_ ) # no dropped or added examples lowerCAmelCase : Any = DataLoader(UpperCamelCase_ , batch_sampler=UpperCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = [] for batch in data_loader: lowerCAmelCase : Union[str, Any] = batch['''input_ids'''].shape lowerCAmelCase : Optional[Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple lowerCAmelCase : Optional[int] = np.product(batch['''input_ids'''].shape ) num_src_per_batch.append(UpperCamelCase_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(UpperCamelCase_ ) assert num_src_per_batch[0] == max(UpperCamelCase_ ) if failures: raise AssertionError(F'''too many tokens in {len(UpperCamelCase_ )} batches''' ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_dataset(max_len=5_1_2 ) lowerCAmelCase : Tuple = 2 lowerCAmelCase : List[str] = ds.make_sortish_sampler(UpperCamelCase_ , shuffle=UpperCamelCase_ ) lowerCAmelCase : List[Any] = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 ) lowerCAmelCase : Tuple = DataLoader(UpperCamelCase_ , batch_size=UpperCamelCase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.pad_token_id def count_pad_tokens(UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]="input_ids" ): return [batch[k].eq(UpperCamelCase_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(UpperCamelCase_ , k='''labels''' ) ) < sum(count_pad_tokens(UpperCamelCase_ , k='''labels''' ) ) assert sum(count_pad_tokens(UpperCamelCase_ ) ) < sum(count_pad_tokens(UpperCamelCase_ ) ) assert len(UpperCamelCase_ ) == len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[str]=1_0_0_0 , UpperCamelCase_ : str=1_2_8 ): if os.getenv('''USE_REAL_DATA''' , UpperCamelCase_ ): lowerCAmelCase : List[Any] = '''examples/seq2seq/wmt_en_ro''' lowerCAmelCase : Any = max_len * 2 * 6_4 if not Path(UpperCamelCase_ ).joinpath('''train.len''' ).exists(): save_len_file(UpperCamelCase_ , UpperCamelCase_ ) else: lowerCAmelCase : List[str] = '''examples/seq2seq/test_data/wmt_en_ro''' lowerCAmelCase : Union[str, Any] = max_len * 4 save_len_file(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = AutoTokenizer.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : str = SeqaSeqDataset( UpperCamelCase_ , data_dir=UpperCamelCase_ , type_path='''train''' , max_source_length=UpperCamelCase_ , max_target_length=UpperCamelCase_ , n_obs=UpperCamelCase_ , ) return ds, max_tokens, tokenizer def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = self._get_dataset() lowerCAmelCase : List[Any] = set(DistributedSortishSampler(UpperCamelCase_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase_ ) ) lowerCAmelCase : int = set(DistributedSortishSampler(UpperCamelCase_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase_ ) ) assert idsa.intersection(UpperCamelCase_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) if tok_name == MBART_TINY: lowerCAmelCase : Any = SeqaSeqDataset( UpperCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , ) lowerCAmelCase : Any = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: lowerCAmelCase : Tuple = SeqaSeqDataset( UpperCamelCase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , ) lowerCAmelCase : Tuple = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(UpperCamelCase_ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase_ ) == 0
637
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict snake_case__ : Optional[Any] = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( _snake_case : Any ): lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case ) lowerCAmelCase : str = TestCommand(*_snake_case ) test_command.run() lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' ) assert os.path.exists(_snake_case ) lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case ) lowerCAmelCase : List[str] = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case ) if key == "num_bytes": assert is_apercent_close(_snake_case , _snake_case ) elif key == "splits": assert list(_snake_case ) == list(_snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
637
1
"""simple docstring""" import argparse import os import re import packaging.version snake_case__ : Any = '''examples/''' snake_case__ : int = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } snake_case__ : Optional[Any] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } snake_case__ : Dict = '''README.md''' def _snake_case ( _snake_case : Tuple , _snake_case : str , _snake_case : Dict ): with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : Tuple = f.read() lowerCAmelCase, lowerCAmelCase : Union[str, Any] = REPLACE_PATTERNS[pattern] lowerCAmelCase : Tuple = replace.replace('''VERSION''' , _snake_case ) lowerCAmelCase : Union[str, Any] = re_pattern.sub(_snake_case , _snake_case ) with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(_snake_case ) def _snake_case ( _snake_case : Optional[Any] ): for folder, directories, fnames in os.walk(_snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_snake_case , _snake_case ) , _snake_case , pattern='''examples''' ) def _snake_case ( _snake_case : List[str] , _snake_case : List[Any]=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_snake_case , _snake_case , _snake_case ) if not patch: update_version_in_examples(_snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' lowerCAmelCase : Tuple = '''1. Want to contribute a new model?''' with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase : List[str] = f.readlines() # Find the start of the list. lowerCAmelCase : List[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCAmelCase : Any = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): lowerCAmelCase : Any = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(_snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_snake_case ) def _snake_case ( ): with open(REPLACE_FILES['''init'''] , '''r''' ) as f: lowerCAmelCase : int = f.read() lowerCAmelCase : Tuple = REPLACE_PATTERNS['''init'''][0].search(_snake_case ).groups()[0] return packaging.version.parse(_snake_case ) def _snake_case ( _snake_case : List[Any]=False ): lowerCAmelCase : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: lowerCAmelCase : str = default_version.base_version elif patch: lowerCAmelCase : Dict = f'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: lowerCAmelCase : Optional[int] = f'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. lowerCAmelCase : Union[str, Any] = input(f'''Which version are you releasing? [{default_version}]''' ) if len(_snake_case ) == 0: lowerCAmelCase : List[Any] = default_version print(f'''Updating version to {version}.''' ) global_version_update(_snake_case , patch=_snake_case ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _snake_case ( ): lowerCAmelCase : str = get_version() lowerCAmelCase : Dict = f'''{current_version.major}.{current_version.minor + 1}.0.dev0''' lowerCAmelCase : Optional[Any] = current_version.base_version # Check with the user we got that right. lowerCAmelCase : Tuple = input(f'''Which version are we developing now? [{dev_version}]''' ) if len(_snake_case ) == 0: lowerCAmelCase : str = dev_version print(f'''Updating version to {version}.''' ) global_version_update(_snake_case ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": snake_case__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') snake_case__ : Dict = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
637
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ): return base * power(_snake_case , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip()) snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip()) snake_case__ : Any = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents snake_case__ : Dict = 1 / result print(f"""{base} to the power of {exponent} is {result}""")
637
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer snake_case__ : Optional[int] = logging.get_logger(__name__) snake_case__ : int = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : List[str] = { '''vocab_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json''' ), }, '''merges_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''', '''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''', '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt''' ), }, '''tokenizer_file''': { '''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''', '''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''', '''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''', '''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''', '''roberta-base-openai-detector''': ( '''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json''' ), '''roberta-large-openai-detector''': ( '''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json''' ), }, } snake_case__ : Tuple = { '''roberta-base''': 512, '''roberta-large''': 512, '''roberta-large-mnli''': 512, '''distilroberta-base''': 512, '''roberta-base-openai-detector''': 512, '''roberta-large-openai-detector''': 512, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = RobertaTokenizer def __init__( self : Any , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[int]="replace" , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : Optional[int]="</s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : List[Any]="<mask>" , UpperCamelCase_ : Any=False , UpperCamelCase_ : List[str]=True , **UpperCamelCase_ : int , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space: lowerCAmelCase : Tuple = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) ) lowerCAmelCase : Union[str, Any] = add_prefix_space lowerCAmelCase : Optional[Any] = pre_tok_class(**UpperCamelCase_ ) lowerCAmelCase : int = add_prefix_space lowerCAmelCase : Any = '''post_processor''' lowerCAmelCase : str = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) if tokenizer_component_instance: lowerCAmelCase : int = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase : Any = tuple(state['''sep'''] ) if "cls" in state: lowerCAmelCase : Optional[Any] = tuple(state['''cls'''] ) lowerCAmelCase : List[Any] = False if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space: lowerCAmelCase : Optional[int] = add_prefix_space lowerCAmelCase : List[str] = True if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets: lowerCAmelCase : Tuple = trim_offsets lowerCAmelCase : List[Any] = True if changes_to_apply: lowerCAmelCase : Any = getattr(UpperCamelCase_ , state.pop('''type''' ) ) lowerCAmelCase : Tuple = component_class(**UpperCamelCase_ ) setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) @property def lowerCamelCase__ ( self : List[Any] ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[Any] ): lowerCAmelCase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value lowerCAmelCase : Optional[Any] = value def lowerCamelCase__ ( self : List[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : int ): lowerCAmelCase : Union[str, Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Union[str, Any] ): lowerCAmelCase : Any = kwargs.get('''is_split_into_words''' , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : Any = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : List[str] = [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
637
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : int = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ): if attention_mask is None: lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case_: def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ): lowerCAmelCase : Tuple = parent lowerCAmelCase : str = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : Optional[int] = is_training lowerCAmelCase : int = use_labels lowerCAmelCase : List[Any] = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : Any = num_attention_heads lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : Optional[int] = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Optional[int] = attention_probs_dropout_prob lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Union[str, Any] = eos_token_id lowerCAmelCase : Dict = pad_token_id lowerCAmelCase : Optional[Any] = bos_token_id lowerCAmelCase : List[str] = initializer_range def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self : str ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ): lowerCAmelCase : int = 2_0 lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : List[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : List[str] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = 2_0 lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : Dict = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ ) lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class snake_case_( unittest.TestCase ): __UpperCamelCase = 99 def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase : List[Any] = input_ids.shape[0] lowerCAmelCase : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data() lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ ) lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ) lowerCAmelCase : str = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case_( a__ , unittest.TestCase , a__ ): __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = FlaxBlenderbotModelTester(self ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) @jax.jit def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ): return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase : List[Any] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): return model.decode( decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase : List[str] = model(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5} lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) lowerCAmelCase : List[Any] = ['''Sam'''] lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' ) lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ ) assert generated_txt[0].strip() == tgt_text
637
1
"""simple docstring""" def _snake_case ( _snake_case : int ): if length <= 0 or not isinstance(_snake_case , _snake_case ): raise ValueError('''Length must be a positive integer.''' ) return [n * (2 * n - 1) for n in range(_snake_case )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
637
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example snake_case__ : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _snake_case ( _snake_case : list[list[int]] ): lowerCAmelCase : Union[str, Any] = [] for i in range(len(_snake_case ) ): lowerCAmelCase : Any = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_snake_case ) return next_generation def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ): lowerCAmelCase : int = [] for _ in range(_snake_case ): # Create output image lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) ) lowerCAmelCase : Union[str, Any] = img.load() # Save cells to image for x in range(len(_snake_case ) ): for y in range(len(cells[0] ) ): lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255 lowerCAmelCase : List[Any] = (colour, colour, colour) # Save image images.append(_snake_case ) lowerCAmelCase : Union[str, Any] = new_generation(_snake_case ) return images if __name__ == "__main__": snake_case__ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
637
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_( a__ ): __UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' __UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) __UpperCamelCase = '''summarizer''' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ['''text'''] __UpperCamelCase = ['''text'''] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ): return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
637
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self : Dict ): # searches pattern in text and returns index positions lowerCAmelCase : Union[str, Any] = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ ) if mismatch_index == -1: positions.append(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : str = '''ABAABA''' snake_case__ : List[str] = '''AB''' snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern) snake_case__ : Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
637
1
"""simple docstring""" import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin snake_case__ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''') snake_case__ : List[str] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') snake_case__ : List[str] = '''pt''' if is_torch_available() else '''tf''' @require_sentencepiece @require_tokenizers class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = CamembertTokenizer __UpperCamelCase = CamembertTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def lowerCamelCase__ ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : Optional[Any] = CamembertTokenizer(UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : int = '''<pad>''' lowerCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_4 ) def lowerCamelCase__ ( self : List[str] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Dict = CamembertTokenizer(UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) lowerCAmelCase : str = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) lowerCAmelCase : List[Any] = '''I was born in 92000, and this is falsé.''' lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ ) lowerCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : List[str] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) lowerCAmelCase : Dict = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): if not self.test_rust_tokenizer: return lowerCAmelCase : Tuple = self.get_tokenizer() lowerCAmelCase : List[str] = self.get_rust_tokenizer() lowerCAmelCase : Dict = '''I was born in 92000, and this is falsé.''' lowerCAmelCase : Any = tokenizer.tokenize(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer() lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ ) lowerCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : List[str] ): # fmt: off lowerCAmelCase : Dict = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. lowerCAmelCase : Any = [ '''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ''' '''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''', '''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ''' '''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ''' '''telles que la traduction et la synthèse de texte.''', ] self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=UpperCamelCase_ , )
637
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_( a__ ): pass class snake_case_: def __init__( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Any = data lowerCAmelCase : Node | None = None def __iter__( self : int ): lowerCAmelCase : Any = self lowerCAmelCase : Union[str, Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCamelCase_ ) yield node.data lowerCAmelCase : Optional[int] = node.next_node @property def lowerCamelCase__ ( self : str ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": snake_case__ : Dict = Node(1) snake_case__ : Any = Node(2) snake_case__ : int = Node(3) snake_case__ : Any = Node(4) print(root_node.has_loop) # False snake_case__ : Tuple = root_node.next_node print(root_node.has_loop) # True snake_case__ : List[Any] = Node(5) snake_case__ : int = Node(6) snake_case__ : List[Any] = Node(5) snake_case__ : Dict = Node(6) print(root_node.has_loop) # False snake_case__ : Any = Node(1) print(root_node.has_loop) # False
637
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : str = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys snake_case__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
637
"""simple docstring""" from torch import nn class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ): super().__init__() lowerCAmelCase : str = class_size lowerCAmelCase : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase : int = self.mlp(UpperCamelCase_ ) return logits
637
1
"""simple docstring""" def _snake_case ( _snake_case : list ): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCAmelCase : Union[str, Any] = grid[0] for row_n in range(1 , len(_snake_case ) ): lowerCAmelCase : Any = grid[row_n] lowerCAmelCase : int = fill_row(_snake_case , _snake_case ) lowerCAmelCase : int = grid[row_n] return grid[-1][-1] def _snake_case ( _snake_case : list , _snake_case : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(_snake_case ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : str ): lowerCAmelCase : Dict = val lowerCAmelCase : str = None lowerCAmelCase : Dict = None def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ): if self.val: if val < self.val: if self.left is None: lowerCAmelCase : int = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowerCAmelCase : Any = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = val def _snake_case ( _snake_case : Tuple , _snake_case : str ): # Recursive traversal if root: inorder(root.left , _snake_case ) res.append(root.val ) inorder(root.right , _snake_case ) def _snake_case ( _snake_case : Optional[Any] ): # Build BST if len(_snake_case ) == 0: return arr lowerCAmelCase : Optional[Any] = Node(arr[0] ) for i in range(1 , len(_snake_case ) ): root.insert(arr[i] ) # Traverse BST in order. lowerCAmelCase : Optional[int] = [] inorder(_snake_case , _snake_case ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
637
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Any , *UpperCamelCase_ : str , **UpperCamelCase_ : Dict ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Optional[Any] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Any ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : List[str] , *UpperCamelCase_ : int , **UpperCamelCase_ : Dict ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : List[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Dict , *UpperCamelCase_ : str , **UpperCamelCase_ : Union[str, Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Any , *UpperCamelCase_ : Any , **UpperCamelCase_ : Optional[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Any , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Dict ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Tuple , *UpperCamelCase_ : Any , **UpperCamelCase_ : str ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Optional[int] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Optional[Any] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Tuple ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : str , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : List[Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : int ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Dict , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Union[str, Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Tuple , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Any ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : List[str] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Dict , *UpperCamelCase_ : str , **UpperCamelCase_ : str ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : int ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : str , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : int ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : int , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Any , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Tuple ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : int , *UpperCamelCase_ : str , **UpperCamelCase_ : Optional[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : int , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Any ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Optional[Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Optional[int] , *UpperCamelCase_ : List[str] , **UpperCamelCase_ : Union[str, Any] ): requires_backends(self , ['''sentencepiece'''] ) class snake_case_( metaclass=a__ ): __UpperCamelCase = ['''sentencepiece'''] def __init__( self : Dict , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Union[str, Any] ): requires_backends(self , ['''sentencepiece'''] )
637
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : int = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case_( a__ ): __UpperCamelCase = '''levit''' def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Tuple = image_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = kernel_size lowerCAmelCase : Dict = stride lowerCAmelCase : List[Any] = padding lowerCAmelCase : Dict = hidden_sizes lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Tuple = depths lowerCAmelCase : Dict = key_dim lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : List[Any] = patch_size lowerCAmelCase : Tuple = attention_ratio lowerCAmelCase : Optional[int] = mlp_ratio lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : List[str] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : Tuple ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : Optional[Any] ): return 1E-4
637
1
"""simple docstring""" def _snake_case ( _snake_case : int ): if not isinstance(_snake_case , _snake_case ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) lowerCAmelCase : Optional[int] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : str = 3 lowerCAmelCase : Tuple = 2_5_0 lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) lowerCAmelCase : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : str ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
637
1
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : int ): lowerCAmelCase : Optional[int] = num_of_nodes lowerCAmelCase : list[list[int]] = [] lowerCAmelCase : dict[int, int] = {} def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ): self.m_edges.append([u_node, v_node, weight] ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ): if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ): if self.m_component[u_node] != u_node: for k in self.m_component: lowerCAmelCase : List[str] = self.find_component(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : list[int] , UpperCamelCase_ : int , UpperCamelCase_ : int ): if component_size[u_node] <= component_size[v_node]: lowerCAmelCase : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCamelCase_ ) elif component_size[u_node] >= component_size[v_node]: lowerCAmelCase : Union[str, Any] = self.find_component(UpperCamelCase_ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[int] = [] lowerCAmelCase : Tuple = 0 lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) lowerCAmelCase : Any = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[Any] = edge lowerCAmelCase : Union[str, Any] = self.m_component[u] lowerCAmelCase : Optional[Any] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): lowerCAmelCase : str = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : List[str] = edge lowerCAmelCase : int = self.m_component[u] lowerCAmelCase : Any = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) print(F'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' ) num_of_components -= 1 lowerCAmelCase : Dict = [-1] * self.m_num_of_nodes print(F'''The total weight of the minimal spanning tree is: {mst_weight}''' ) def _snake_case ( ): pass if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = None def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : List[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase : List[Any] = [] for i in range(_snake_case ): lowerCAmelCase : int = i / num_diffusion_timesteps lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class snake_case_( a__ , a__ ): @register_to_config def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ ) lowerCAmelCase : str = 1.0 - self.betas lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase : Tuple = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase : Any = 1.0 # setable values lowerCAmelCase : Any = None lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() ) lowerCAmelCase : List[str] = variance_type def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ): return sample def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ): lowerCAmelCase : Any = num_inference_steps lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ): if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : int = self.alphas_cumprod[t] lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : Dict = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : Tuple = self.betas[t] else: lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) ) lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase : Optional[Any] = variance.log() lowerCAmelCase : Union[str, Any] = beta.log() lowerCAmelCase : Dict = (predicted_variance + 1) / 2 lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 ) else: lowerCAmelCase : Optional[int] = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t] lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : int = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : List[Any] = self.betas[t] lowerCAmelCase : Optional[int] = self.alphas[t] else: lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase : Dict = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase : Tuple = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase : Dict = torch.clamp( UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase : int = 0 if t > 0: lowerCAmelCase : Union[str, Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device ) lowerCAmelCase : Any = self._get_variance( UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , ) if self.variance_type == "fixed_small_log": lowerCAmelCase : str = variance elif self.variance_type == "learned_range": lowerCAmelCase : Optional[Any] = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) lowerCAmelCase : List[Any] = variance * variance_noise lowerCAmelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase : int = timesteps.to(original_samples.device ) lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
637
1
"""simple docstring""" import numpy as np def _snake_case ( _snake_case : np.ndarray ): return 1 / (1 + np.exp(-vector )) def _snake_case ( _snake_case : np.ndarray ): return vector * sigmoid(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class snake_case_: def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ): lowerCAmelCase : Any = parent lowerCAmelCase : Any = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[Any] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : Union[str, Any] = use_labels lowerCAmelCase : List[str] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : int = num_hidden_layers lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : List[Any] = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : Tuple = attention_probs_dropout_prob lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Tuple = type_sequence_label_size lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = num_labels lowerCAmelCase : Optional[int] = num_choices lowerCAmelCase : Tuple = scope def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Tuple = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : List[str] = None if self.use_token_type_ids: lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : int = None lowerCAmelCase : int = None lowerCAmelCase : Tuple = None if self.use_labels: lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = True lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ): lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : str = True lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] lowerCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] # select random slice lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = config_and_inputs lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = LlamaModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : str = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : List[str] = 3 lowerCAmelCase : List[str] = input_dict['''input_ids'''] lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : int = '''single_label_classification''' lowerCAmelCase : Tuple = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : Dict = '''multi_label_classification''' lowerCAmelCase : Union[str, Any] = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0} lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) @require_torch class snake_case_( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' lowerCAmelCase : int = '''Simply put, the theory of relativity states that ''' lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ ) # greedy generation outputs lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
637
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version snake_case__ : Optional[int] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') snake_case__ : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) snake_case__ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case_: __UpperCamelCase = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) __UpperCamelCase = field(default=a__ , metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCamelCase = field(default=a__ , metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCamelCase = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCamelCase = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) __UpperCamelCase = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Tuple = {} if self.train_dir is not None: lowerCAmelCase : List[Any] = self.train_dir if self.validation_dir is not None: lowerCAmelCase : Tuple = self.validation_dir lowerCAmelCase : Dict = data_files if data_files else None @dataclass class snake_case_: __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(a__ )} , ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) __UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __UpperCamelCase = field(default=a__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) __UpperCamelCase = field( default=a__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class snake_case_: def __init__( self : Dict , UpperCamelCase_ : List[Any]=1_9_2 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Optional[int]=0.6 ): lowerCAmelCase : Optional[int] = input_size lowerCAmelCase : List[Any] = mask_patch_size lowerCAmelCase : Any = model_patch_size lowerCAmelCase : Any = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) lowerCAmelCase : int = self.input_size // self.mask_patch_size lowerCAmelCase : List[Any] = self.mask_patch_size // self.model_patch_size lowerCAmelCase : Tuple = self.rand_size**2 lowerCAmelCase : Optional[Any] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : str ): lowerCAmelCase : Any = np.random.permutation(self.token_count )[: self.mask_count] lowerCAmelCase : Optional[Any] = np.zeros(self.token_count , dtype=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = 1 lowerCAmelCase : Optional[int] = mask.reshape((self.rand_size, self.rand_size) ) lowerCAmelCase : List[str] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def _snake_case ( _snake_case : str ): lowerCAmelCase : int = torch.stack([example['''pixel_values'''] for example in examples] ) lowerCAmelCase : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , _snake_case , _snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase : List[str] = training_args.get_process_log_level() logger.setLevel(_snake_case ) transformers.utils.logging.set_verbosity(_snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowerCAmelCase : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. lowerCAmelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCAmelCase : Optional[int] = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _snake_case ) and data_args.train_val_split > 0.0: lowerCAmelCase : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) lowerCAmelCase : List[str] = split['''train'''] lowerCAmelCase : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **_snake_case ) elif model_args.model_name_or_path: lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: lowerCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(f'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(f'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_snake_case , '''decoder_type''' ): lowerCAmelCase : Tuple = '''simmim''' # adapt config lowerCAmelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size lowerCAmelCase : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size lowerCAmelCase : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_snake_case ) elif model_args.model_name_or_path: lowerCAmelCase : Optional[int] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_snake_case ) else: lowerCAmelCase : List[str] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } lowerCAmelCase : List[Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: lowerCAmelCase : Any = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) lowerCAmelCase : int = AutoModelForMaskedImageModeling.from_config(_snake_case ) if training_args.do_train: lowerCAmelCase : Union[str, Any] = ds['''train'''].column_names else: lowerCAmelCase : str = ds['''validation'''].column_names if data_args.image_column_name is not None: lowerCAmelCase : Dict = data_args.image_column_name elif "image" in column_names: lowerCAmelCase : Union[str, Any] = '''image''' elif "img" in column_names: lowerCAmelCase : Optional[int] = '''img''' else: lowerCAmelCase : str = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py lowerCAmelCase : str = Compose( [ Lambda(lambda _snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator lowerCAmelCase : Union[str, Any] = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_snake_case : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = [transforms(_snake_case ) for image in examples[image_column_name]] lowerCAmelCase : Optional[int] = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: lowerCAmelCase : Optional[int] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: lowerCAmelCase : Dict = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_snake_case ) # Initialize our trainer lowerCAmelCase : Any = Trainer( model=_snake_case , args=_snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_snake_case , data_collator=_snake_case , ) # Training if training_args.do_train: lowerCAmelCase : List[Any] = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase : Any = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase : Dict = last_checkpoint lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCAmelCase : Optional[int] = trainer.evaluate() trainer.log_metrics('''eval''' , _snake_case ) trainer.save_metrics('''eval''' , _snake_case ) # Write model card and (optionally) push to hub lowerCAmelCase : List[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**_snake_case ) else: trainer.create_model_card(**_snake_case ) if __name__ == "__main__": main()
637
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ): lowerCAmelCase : Dict = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ): lowerCAmelCase : Optional[int] = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , ) for _ in range(1_0_0_0 ): lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class snake_case_( unittest.TestCase ): __UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase = 10 def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' ) class snake_case_: def __init__( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = fn def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ): return self.fn(*UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
637
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: snake_case__ : Optional[int] = None snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : Dict = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Optional[int] = { '''vocab_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''', }, '''tokenizer_file''': { '''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''', }, } snake_case__ : Any = { '''google/rembert''': 256, } snake_case__ : List[str] = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = RemBertTokenizer def __init__( self : List[str] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Optional[Any]="[CLS]" , UpperCamelCase_ : Dict="[SEP]" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Optional[Any]="<pad>" , UpperCamelCase_ : List[str]="[CLS]" , UpperCamelCase_ : Optional[int]="[MASK]" , **UpperCamelCase_ : Dict , ): # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = do_lower_case lowerCAmelCase : Any = remove_space lowerCAmelCase : Optional[Any] = keep_accents lowerCAmelCase : Any = vocab_file lowerCAmelCase : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : int = [self.sep_token_id] lowerCAmelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : List[str] = [self.sep_token_id] lowerCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error('''Vocabulary path ({}) should be a directory'''.format(UpperCamelCase_ ) ) return lowerCAmelCase : str = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_( a__ ): __UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' __UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) __UpperCamelCase = '''summarizer''' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ['''text'''] __UpperCamelCase = ['''text'''] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ): return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
637
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor snake_case__ : List[str] = logging.get_logger(__name__) class snake_case_( a__ ): def __init__( self : Union[str, Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Tuple ): warnings.warn( '''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DeiTImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
637
"""simple docstring""" snake_case__ : List[Any] = '''Tobias Carryer''' from time import time class snake_case_: def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008 lowerCAmelCase : str = multiplier lowerCAmelCase : Optional[int] = increment lowerCAmelCase : Optional[Any] = modulo lowerCAmelCase : Optional[Any] = seed def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
637
1
"""simple docstring""" snake_case__ : Any = { '''Pillow''': '''Pillow''', '''accelerate''': '''accelerate>=0.11.0''', '''compel''': '''compel==0.1.8''', '''black''': '''black~=23.1''', '''datasets''': '''datasets''', '''filelock''': '''filelock''', '''flax''': '''flax>=0.4.1''', '''hf-doc-builder''': '''hf-doc-builder>=0.3.0''', '''huggingface-hub''': '''huggingface-hub>=0.13.2''', '''requests-mock''': '''requests-mock==1.10.0''', '''importlib_metadata''': '''importlib_metadata''', '''invisible-watermark''': '''invisible-watermark''', '''isort''': '''isort>=5.5.4''', '''jax''': '''jax>=0.2.8,!=0.3.2''', '''jaxlib''': '''jaxlib>=0.1.65''', '''Jinja2''': '''Jinja2''', '''k-diffusion''': '''k-diffusion>=0.0.12''', '''torchsde''': '''torchsde''', '''note_seq''': '''note_seq''', '''librosa''': '''librosa''', '''numpy''': '''numpy''', '''omegaconf''': '''omegaconf''', '''parameterized''': '''parameterized''', '''protobuf''': '''protobuf>=3.20.3,<4''', '''pytest''': '''pytest''', '''pytest-timeout''': '''pytest-timeout''', '''pytest-xdist''': '''pytest-xdist''', '''ruff''': '''ruff>=0.0.241''', '''safetensors''': '''safetensors''', '''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''', '''scipy''': '''scipy''', '''onnx''': '''onnx''', '''regex''': '''regex!=2019.12.17''', '''requests''': '''requests''', '''tensorboard''': '''tensorboard''', '''torch''': '''torch>=1.4''', '''torchvision''': '''torchvision''', '''transformers''': '''transformers>=4.25.1''', '''urllib3''': '''urllib3<=2.0.0''', }
637
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Any = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } snake_case__ : int = { '''google/bigbird-roberta-base''': 4_096, '''google/bigbird-roberta-large''': 4_096, '''google/bigbird-base-trivia-itc''': 4_096, } snake_case__ : Optional[Any] = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BigBirdTokenizer __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = [] def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = vocab_file lowerCAmelCase : Optional[int] = False if not self.vocab_file else True def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : str = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Tuple = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
1
"""simple docstring""" from sklearn.metrics import matthews_corrcoef import datasets snake_case__ : List[Any] = ''' Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] ''' snake_case__ : Union[str, Any] = ''' Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results[\'matthews_correlation\'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results[\'matthews_correlation\'], 2)) -0.25 ''' snake_case__ : Tuple = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case_( datasets.Metric ): def lowerCamelCase__ ( self : List[str] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html''' ] , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any=None ): return { "matthews_correlation": float(matthews_corrcoef(UpperCamelCase_ , UpperCamelCase_ , sample_weight=UpperCamelCase_ ) ), }
637
"""simple docstring""" # using dfs for finding eulerian path traversal def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ): lowerCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) return path def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Optional[Any] = -1 for i in range(_snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase : Optional[Any] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ): lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowerCAmelCase : Dict = 1 if check == 2: lowerCAmelCase : int = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case ) print(_snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase : Any = { 1: [], 2: [] # all degree is zero } lowerCAmelCase : List[str] = 10 check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
1
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin snake_case__ : Dict = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece @require_tokenizers class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = XLMRobertaTokenizer __UpperCamelCase = XLMRobertaTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def lowerCamelCase__ ( self : Dict ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : Dict = XLMRobertaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = '''<pad>''' lowerCAmelCase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-1] , '''<mask>''' ) self.assertEqual(len(UpperCamelCase_ ) , 1_0_0_2 ) def lowerCamelCase__ ( self : Tuple ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = XLMRobertaTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowerCamelCase__ ( self : List[str] ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase : List[str] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-xlm-roberta''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tempfile.mkdtemp() lowerCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCAmelCase : Tuple = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase : Any = tokenizer_r.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp() lowerCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) lowerCAmelCase : str = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase : Dict = tempfile.mkdtemp() lowerCAmelCase : Any = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase : str = tokenizer_r.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) @cached_property def lowerCamelCase__ ( self : List[str] ): return XLMRobertaTokenizer.from_pretrained('''xlm-roberta-base''' ) def lowerCamelCase__ ( self : Optional[Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase_ , f.name ) lowerCAmelCase : List[str] = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase_ ) lowerCAmelCase : Dict = pickle.dumps(UpperCamelCase_ ) pickle.loads(UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): if not self.test_rust_tokenizer: return lowerCAmelCase : Dict = self.get_tokenizer() lowerCAmelCase : Union[str, Any] = self.get_rust_tokenizer() lowerCAmelCase : Optional[Any] = '''I was born in 92000, and this is falsé.''' lowerCAmelCase : Dict = tokenizer.tokenize(UpperCamelCase_ ) lowerCAmelCase : List[str] = rust_tokenizer.tokenize(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Dict = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = self.get_rust_tokenizer() lowerCAmelCase : Dict = tokenizer.encode(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) @slow def lowerCamelCase__ ( self : str ): lowerCAmelCase : Dict = '''Hello World!''' lowerCAmelCase : Tuple = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[Any] = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) lowerCAmelCase : Tuple = [ 0, 3_2_9_3, 8_3, 1_0, 4_5_5_2, 4_9_8_9, 7_9_8_6, 6_7_8, 1_0, 5_9_1_5, 1_1_1, 1_7_9_4_5_9, 1_2_4_8_5_0, 4, 6_0_4_4, 2_3_7, 1_2, 6, 5, 6, 4, 6_7_8_0, 7_0_5, 1_5, 1_3_8_8, 4_4, 3_7_8, 1_0_1_1_4, 7_1_1, 1_5_2, 2_0, 6, 5, 2_2_3_7_6, 6_4_2, 1_2_2_1, 1_5_1_9_0, 3_4_1_5_3, 4_5_0, 5_6_0_8, 9_5_9, 1_1_1_9, 5_7_7_0_2, 1_3_6, 1_8_6, 4_7, 1_0_9_8, 2_9_3_6_7, 4_7, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_0_4_4, 2_3_7, 6_2_8_4, 5_0_9_0_1, 5_2_8, 3_1, 9_0, 3_4, 9_2_7, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) ) @slow def lowerCamelCase__ ( self : Any ): # fmt: off lowerCAmelCase : List[str] = {'''input_ids''': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase_ , model_name='''xlm-roberta-base''' , revision='''d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3''' , )
637
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = 0 @slow def lowerCamelCase__ ( self : Dict ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) # Check that tokenizer_type ≠ model_type lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): with pytest.raises(UpperCamelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowerCamelCase__ ( self : str ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowerCamelCase__ ( self : Tuple ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values() lowerCAmelCase : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Any ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?''' lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowerCamelCase__ ( self : int ): lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): # Check we can load the tokenizer config of an online model. lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowerCamelCase__ ( self : Optional[int] ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCamelCase__ ( self : str ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): class snake_case_( a__ ): __UpperCamelCase = False class snake_case_( a__ ): __UpperCamelCase = NewTokenizer __UpperCamelCase = False try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCamelCase__ ( self : str ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' ) def lowerCamelCase__ ( self : int ): with self.assertRaisesRegex( UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def lowerCamelCase__ ( self : Optional[int] ): # Make sure we have cached the tokenizer. lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
637
1
"""simple docstring""" def _snake_case ( _snake_case : int ): if not isinstance(_snake_case , _snake_case ): raise TypeError('''only integers accepted as input''' ) else: lowerCAmelCase : Tuple = str(abs(_snake_case ) ) lowerCAmelCase : List[Any] = [list(_snake_case ) for char in range(len(_snake_case ) )] for index in range(len(_snake_case ) ): num_transpositions[index].pop(_snake_case ) return max( int(''''''.join(list(_snake_case ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
637
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } snake_case__ : List[Any] = { '''allenai/led-base-16384''': 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _snake_case ( ): lowerCAmelCase : Optional[int] = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCAmelCase : str = bs[:] lowerCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(_snake_case ) cs.append(2**8 + n ) n += 1 lowerCAmelCase : int = [chr(_snake_case ) for n in cs] return dict(zip(_snake_case , _snake_case ) ) def _snake_case ( _snake_case : List[Any] ): lowerCAmelCase : List[str] = set() lowerCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase : Optional[Any] = char return pairs class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ): lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase : Any = json.load(UpperCamelCase_ ) lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()} lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding lowerCAmelCase : List[Any] = bytes_to_unicode() lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : List[Any] = {} lowerCAmelCase : Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase__ ( self : Union[str, Any] ): return len(self.encoder ) def lowerCamelCase__ ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ): if token in self.cache: return self.cache[token] lowerCAmelCase : List[str] = tuple(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase, lowerCAmelCase : Any = bigram lowerCAmelCase : Tuple = [] lowerCAmelCase : Any = 0 while i < len(UpperCamelCase_ ): try: lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase : int = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase : Tuple = tuple(UpperCamelCase_ ) lowerCAmelCase : Tuple = new_word if len(UpperCamelCase_ ) == 1: break else: lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ ) lowerCAmelCase : List[str] = word return word def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Dict = [] for token in re.findall(self.pat , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) ) return bpe_tokens def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ): return self.decoder.get(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : int = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) lowerCAmelCase : Optional[int] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase : Tuple = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] lowerCAmelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): lowerCAmelCase : List[Any] = ''' ''' + text return (text, kwargs) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Dict = super()._pad( encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ ) if needs_to_be_padded: lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase : Dict = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase : int = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
637
1
"""simple docstring""" import math def _snake_case ( _snake_case : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( _snake_case : float = 0.1 ): lowerCAmelCase : Optional[Any] = 3 lowerCAmelCase : Any = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ): primes += is_prime(_snake_case ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" def _snake_case ( _snake_case : int = 4000000 ): lowerCAmelCase : int = [0, 1] lowerCAmelCase : List[str] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCAmelCase : int = 0 for j in range(len(_snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
637
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _snake_case ( _snake_case : Any ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X2_0000 and cp <= 0X2_a6df) # or (cp >= 0X2_a700 and cp <= 0X2_b73f) # or (cp >= 0X2_b740 and cp <= 0X2_b81f) # or (cp >= 0X2_b820 and cp <= 0X2_ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2_f800 and cp <= 0X2_fa1f) # ): # return True return False def _snake_case ( _snake_case : str ): # word like '180' or '身高' or '神' for char in word: lowerCAmelCase : str = ord(_snake_case ) if not _is_chinese_char(_snake_case ): return 0 return 1 def _snake_case ( _snake_case : List[str] ): lowerCAmelCase : Tuple = set() for token in tokens: lowerCAmelCase : Tuple = len(_snake_case ) > 1 and is_chinese(_snake_case ) if chinese_word: word_set.add(_snake_case ) lowerCAmelCase : str = list(_snake_case ) return word_list def _snake_case ( _snake_case : List[str] , _snake_case : set() ): if not chinese_word_set: return bert_tokens lowerCAmelCase : int = max([len(_snake_case ) for w in chinese_word_set] ) lowerCAmelCase : Union[str, Any] = bert_tokens lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case ) while start < end: lowerCAmelCase : List[Any] = True if is_chinese(bert_word[start] ): lowerCAmelCase : List[Any] = min(end - start , _snake_case ) for i in range(_snake_case , 1 , -1 ): lowerCAmelCase : Any = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): lowerCAmelCase : str = '''##''' + bert_word[j] lowerCAmelCase : Tuple = start + i lowerCAmelCase : Optional[Any] = False break if single_word: start += 1 return bert_word def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ): lowerCAmelCase : Union[str, Any] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0] lowerCAmelCase : str = [get_chinese_word(_snake_case ) for r in res] ltp_res.extend(_snake_case ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Optional[int] = [] for i in range(0 , len(_snake_case ) , 100 ): lowerCAmelCase : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(_snake_case ) == len(_snake_case ) lowerCAmelCase : Tuple = [] for input_ids, chinese_word in zip(_snake_case , _snake_case ): lowerCAmelCase : int = [] for id in input_ids: lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case ) input_tokens.append(_snake_case ) lowerCAmelCase : int = add_sub_symbol(_snake_case , _snake_case ) lowerCAmelCase : Union[str, Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_snake_case ): if token[:2] == "##": lowerCAmelCase : int = token[2:] # save chinese tokens' pos if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ): ref_id.append(_snake_case ) ref_ids.append(_snake_case ) assert len(_snake_case ) == len(_snake_case ) return ref_ids def _snake_case ( _snake_case : Union[str, Any] ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Optional[Any] = f.readlines() lowerCAmelCase : List[str] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' lowerCAmelCase : str = LTP(args.ltp ) # faster in GPU device lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert ) lowerCAmelCase : Dict = prepare_ref(_snake_case , _snake_case , _snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: lowerCAmelCase : Union[str, Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids] f.writelines(_snake_case ) if __name__ == "__main__": snake_case__ : Any = argparse.ArgumentParser(description='''prepare_chinese_ref''') parser.add_argument( '''--file_name''', type=str, default='''./resources/chinese-demo.txt''', help='''file need process, same as training data in lm''', ) parser.add_argument( '''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path''' ) parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''') parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''') snake_case__ : Any = parser.parse_args() main(args)
637
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : list[float] ): if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) ) return round(_snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
637
1
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] ) @pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] ) @pytest.mark.parametrize('''revision''' , [None, '''v2'''] ) def _snake_case ( _snake_case : int , _snake_case : List[Any] , _snake_case : Union[str, Any] ): lowerCAmelCase : int = hf_hub_url(repo_id=_snake_case , path=_snake_case , revision=_snake_case ) assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(_snake_case )}'''
637
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[int] , _snake_case : int ): if len(_snake_case ) == 0: return False lowerCAmelCase : List[Any] = len(_snake_case ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _snake_case ) else: return binary_search(a_list[midpoint + 1 :] , _snake_case ) if __name__ == "__main__": snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip() snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip()) snake_case__ : str = '''''' if binary_search(sequence, target) else '''not ''' print(f"""{target} was {not_str}found in {sequence}""")
637
1
"""simple docstring""" import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py snake_case__ : List[str] = '''.''' if __name__ == "__main__": snake_case__ : Any = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''') snake_case__ : Dict = [] snake_case__ : Tuple = [] with open(doctest_file_path) as fp: for line in fp: snake_case__ : Optional[int] = line.strip() snake_case__ : Union[str, Any] = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: snake_case__ : str = '''\n'''.join(non_existent_paths) raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
637
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict snake_case__ : Optional[Any] = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( _snake_case : Any ): lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case ) lowerCAmelCase : str = TestCommand(*_snake_case ) test_command.run() lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' ) assert os.path.exists(_snake_case ) lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case ) lowerCAmelCase : List[str] = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case ) if key == "num_bytes": assert is_apercent_close(_snake_case , _snake_case ) elif key == "splits": assert list(_snake_case ) == list(_snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
637
1
"""simple docstring""" snake_case__ : Dict = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] snake_case__ : Dict = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] snake_case__ : Any = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] snake_case__ : Optional[int] = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] snake_case__ : Dict = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] snake_case__ : Union[str, Any] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] snake_case__ : List[Any] = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] snake_case__ : Optional[int] = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
637
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ): return base * power(_snake_case , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip()) snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip()) snake_case__ : Any = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents snake_case__ : Dict = 1 / result print(f"""{base} to the power of {exponent} is {result}""")
637
1
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = StableDiffusionControlNetImgaImgPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) __UpperCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase__ ( self : Tuple ): torch.manual_seed(0 ) lowerCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) torch.manual_seed(0 ) lowerCAmelCase : int = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) torch.manual_seed(0 ) lowerCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase : Optional[int] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCAmelCase : Optional[int] = CLIPTextModel(UpperCamelCase_ ) lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase : List[str] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple=0 ): if str(UpperCamelCase_ ).startswith('''mps''' ): lowerCAmelCase : Any = torch.manual_seed(UpperCamelCase_ ) else: lowerCAmelCase : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowerCAmelCase : Tuple = 2 lowerCAmelCase : Union[str, Any] = randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ) lowerCAmelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase : Tuple = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCAmelCase : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def lowerCamelCase__ ( self : List[str] ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCamelCase__ ( self : List[str] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowerCamelCase__ ( self : List[str] ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class snake_case_( a__ , a__ , unittest.TestCase ): __UpperCamelCase = StableDiffusionControlNetImgaImgPipeline __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} __UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __UpperCamelCase = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def lowerCamelCase__ ( self : List[str] ): torch.manual_seed(0 ) lowerCAmelCase : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , ) torch.manual_seed(0 ) def init_weights(UpperCamelCase_ : Optional[int] ): if isinstance(UpperCamelCase_ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) lowerCAmelCase : Optional[int] = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase : Optional[int] = ControlNetModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , ) controlneta.controlnet_down_blocks.apply(UpperCamelCase_ ) torch.manual_seed(0 ) lowerCAmelCase : int = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) lowerCAmelCase : str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) lowerCAmelCase : Optional[Any] = CLIPTextModel(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCAmelCase : Tuple = MultiControlNetModel([controlneta, controlneta] ) lowerCAmelCase : Dict = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any]=0 ): if str(UpperCamelCase_ ).startswith('''mps''' ): lowerCAmelCase : int = torch.manual_seed(UpperCamelCase_ ) else: lowerCAmelCase : Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) lowerCAmelCase : List[Any] = 2 lowerCAmelCase : List[Any] = [ randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ), randn_tensor( (1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=UpperCamelCase_ , device=torch.device(UpperCamelCase_ ) , ), ] lowerCAmelCase : Optional[int] = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((6_4, 6_4) ) lowerCAmelCase : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = self.get_dummy_components() lowerCAmelCase : Any = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) lowerCAmelCase : Tuple = 10.0 lowerCAmelCase : Any = 4 lowerCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : List[str] = steps lowerCAmelCase : Union[str, Any] = scale lowerCAmelCase : Tuple = pipe(**UpperCamelCase_ )[0] lowerCAmelCase : List[Any] = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : Any = steps lowerCAmelCase : Tuple = scale lowerCAmelCase : Tuple = pipe(**UpperCamelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] lowerCAmelCase : Any = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = steps lowerCAmelCase : int = scale lowerCAmelCase : List[str] = pipe(**UpperCamelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] lowerCAmelCase : Dict = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = steps lowerCAmelCase : List[Any] = scale lowerCAmelCase : Optional[int] = pipe(**UpperCamelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def lowerCamelCase__ ( self : str ): return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def lowerCamelCase__ ( self : List[str] ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def lowerCamelCase__ ( self : Optional[Any] ): self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = self.get_dummy_components() lowerCAmelCase : str = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCamelCase_ ) except NotImplementedError: pass @slow @require_torch_gpu class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[int] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) lowerCAmelCase : str = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=UpperCamelCase_ , controlnet=UpperCamelCase_ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowerCAmelCase : int = '''evil space-punk bird''' lowerCAmelCase : Dict = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_1_2, 5_1_2) ) lowerCAmelCase : List[Any] = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_1_2, 5_1_2) ) lowerCAmelCase : List[Any] = pipe( UpperCamelCase_ , UpperCamelCase_ , control_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''np''' , num_inference_steps=5_0 , strength=0.6 , ) lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) lowerCAmelCase : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9E-2
637
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : int = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ): if attention_mask is None: lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case_: def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ): lowerCAmelCase : Tuple = parent lowerCAmelCase : str = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : Optional[int] = is_training lowerCAmelCase : int = use_labels lowerCAmelCase : List[Any] = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : Any = num_attention_heads lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : Optional[int] = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Optional[int] = attention_probs_dropout_prob lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Union[str, Any] = eos_token_id lowerCAmelCase : Dict = pad_token_id lowerCAmelCase : Optional[Any] = bos_token_id lowerCAmelCase : List[str] = initializer_range def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self : str ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ): lowerCAmelCase : int = 2_0 lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : List[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : List[str] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = 2_0 lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : Dict = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ ) lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class snake_case_( unittest.TestCase ): __UpperCamelCase = 99 def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase : List[Any] = input_ids.shape[0] lowerCAmelCase : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data() lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ ) lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ) lowerCAmelCase : str = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case_( a__ , unittest.TestCase , a__ ): __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = FlaxBlenderbotModelTester(self ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) @jax.jit def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ): return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase : List[Any] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): return model.decode( decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase : List[str] = model(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5} lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) lowerCAmelCase : List[Any] = ['''Sam'''] lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' ) lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ ) assert generated_txt[0].strip() == tgt_text
637
1
"""simple docstring""" from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class snake_case_( a__ ): __UpperCamelCase = ['''vqvae'''] def __init__( self : str , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Mel , UpperCamelCase_ : Union[DDIMScheduler, DDPMScheduler] , ): super().__init__() self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , mel=UpperCamelCase_ , vqvae=UpperCamelCase_ ) def lowerCamelCase__ ( self : str ): return 5_0 if isinstance(self.scheduler , UpperCamelCase_ ) else 1_0_0_0 @torch.no_grad() def __call__( self : str , UpperCamelCase_ : int = 1 , UpperCamelCase_ : str = None , UpperCamelCase_ : np.ndarray = None , UpperCamelCase_ : int = 0 , UpperCamelCase_ : int = 0 , UpperCamelCase_ : int = None , UpperCamelCase_ : torch.Generator = None , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 0 , UpperCamelCase_ : torch.Generator = None , UpperCamelCase_ : float = 0 , UpperCamelCase_ : torch.Tensor = None , UpperCamelCase_ : torch.Tensor = None , UpperCamelCase_ : Tuple=True , ): lowerCAmelCase : Any = steps or self.get_default_steps() self.scheduler.set_timesteps(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: lowerCAmelCase : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: lowerCAmelCase : Optional[Any] = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=UpperCamelCase_ , device=self.device , ) lowerCAmelCase : Tuple = noise lowerCAmelCase : Tuple = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = self.mel.audio_slice_to_image(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype='''uint8''' ).reshape( (input_image.height, input_image.width) ) lowerCAmelCase : int = (input_image / 2_5_5) * 2 - 1 lowerCAmelCase : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: lowerCAmelCase : Tuple = self.vqvae.encode(torch.unsqueeze(UpperCamelCase_ , 0 ) ).latent_dist.sample( generator=UpperCamelCase_ )[0] lowerCAmelCase : Any = self.vqvae.config.scaling_factor * input_images if start_step > 0: lowerCAmelCase : Optional[int] = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , self.scheduler.timesteps[start_step - 1] ) lowerCAmelCase : str = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) lowerCAmelCase : List[str] = int(mask_start_secs * pixels_per_second ) lowerCAmelCase : Dict = int(mask_end_secs * pixels_per_second ) lowerCAmelCase : List[Any] = self.scheduler.add_noise(UpperCamelCase_ , UpperCamelCase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , UpperCamelCase_ ): lowerCAmelCase : Dict = self.unet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )['''sample'''] else: lowerCAmelCase : Optional[Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ )['''sample'''] if isinstance(self.scheduler , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = self.scheduler.step( model_output=UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , )['''prev_sample'''] else: lowerCAmelCase : str = self.scheduler.step( model_output=UpperCamelCase_ , timestep=UpperCamelCase_ , sample=UpperCamelCase_ , generator=UpperCamelCase_ , )['''prev_sample'''] if mask is not None: if mask_start > 0: lowerCAmelCase : Any = mask[:, step, :, :mask_start] if mask_end > 0: lowerCAmelCase : Tuple = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance lowerCAmelCase : Tuple = 1 / self.vqvae.config.scaling_factor * images lowerCAmelCase : Optional[int] = self.vqvae.decode(UpperCamelCase_ )['''sample'''] lowerCAmelCase : Tuple = (images / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() lowerCAmelCase : Optional[int] = (images * 2_5_5).round().astype('''uint8''' ) lowerCAmelCase : List[str] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(UpperCamelCase_ , mode='''RGB''' ).convert('''L''' ) for _ in images) ) lowerCAmelCase : List[Any] = [self.mel.image_to_audio(UpperCamelCase_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase_ ) ) @torch.no_grad() def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[Image.Image] , UpperCamelCase_ : int = 5_0 ): assert isinstance(self.scheduler , UpperCamelCase_ ) self.scheduler.set_timesteps(UpperCamelCase_ ) lowerCAmelCase : str = np.array( [np.frombuffer(image.tobytes() , dtype='''uint8''' ).reshape((1, image.height, image.width) ) for image in images] ) lowerCAmelCase : int = (sample / 2_5_5) * 2 - 1 lowerCAmelCase : str = torch.Tensor(UpperCamelCase_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): lowerCAmelCase : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps lowerCAmelCase : Optional[int] = self.scheduler.alphas_cumprod[t] lowerCAmelCase : str = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) lowerCAmelCase : List[Any] = 1 - alpha_prod_t lowerCAmelCase : Union[str, Any] = self.unet(UpperCamelCase_ , UpperCamelCase_ )['''sample'''] lowerCAmelCase : Any = (1 - alpha_prod_t_prev) ** 0.5 * model_output lowerCAmelCase : Optional[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) lowerCAmelCase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def lowerCamelCase__ ( UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : torch.Tensor , UpperCamelCase_ : float ): lowerCAmelCase : Optional[int] = acos(torch.dot(torch.flatten(UpperCamelCase_ ) , torch.flatten(UpperCamelCase_ ) ) / torch.norm(UpperCamelCase_ ) / torch.norm(UpperCamelCase_ ) ) return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase_ ) + sin(alpha * theta ) * xa / sin(UpperCamelCase_ )
637
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example snake_case__ : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _snake_case ( _snake_case : list[list[int]] ): lowerCAmelCase : Union[str, Any] = [] for i in range(len(_snake_case ) ): lowerCAmelCase : Any = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_snake_case ) return next_generation def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ): lowerCAmelCase : int = [] for _ in range(_snake_case ): # Create output image lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) ) lowerCAmelCase : Union[str, Any] = img.load() # Save cells to image for x in range(len(_snake_case ) ): for y in range(len(cells[0] ) ): lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255 lowerCAmelCase : List[Any] = (colour, colour, colour) # Save image images.append(_snake_case ) lowerCAmelCase : Union[str, Any] = new_generation(_snake_case ) return images if __name__ == "__main__": snake_case__ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
637
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case__ : Any = logging.get_logger(__name__) snake_case__ : int = { '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class snake_case_( a__ , a__ ): __UpperCamelCase = '''convnextv2''' def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]="gelu" , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : Union[str, Any]=1E-12 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : int=2_2_4 , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Union[str, Any] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = num_channels lowerCAmelCase : List[Any] = patch_size lowerCAmelCase : List[Any] = num_stages lowerCAmelCase : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes lowerCAmelCase : Any = [3, 3, 9, 3] if depths is None else depths lowerCAmelCase : int = hidden_act lowerCAmelCase : int = initializer_range lowerCAmelCase : Any = layer_norm_eps lowerCAmelCase : List[Any] = drop_path_rate lowerCAmelCase : Union[str, Any] = image_size lowerCAmelCase : Any = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase, lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
637
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self : Dict ): # searches pattern in text and returns index positions lowerCAmelCase : Union[str, Any] = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ ) if mismatch_index == -1: positions.append(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : str = '''ABAABA''' snake_case__ : List[str] = '''AB''' snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern) snake_case__ : Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
637
1
"""simple docstring""" import json from typing import Dict, List, Optional, Tuple, Union from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_led import LEDTokenizer snake_case__ : Any = logging.get_logger(__name__) snake_case__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Optional[int] = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } snake_case__ : Any = { '''allenai/led-base-16384''': 16_384, } class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = LEDTokenizer __UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Union[str, Any]="replace" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Optional[Any]="<mask>" , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[str]=True , **UpperCamelCase_ : List[str] , ): super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space: lowerCAmelCase : List[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) ) lowerCAmelCase : Any = add_prefix_space lowerCAmelCase : int = pre_tok_class(**UpperCamelCase_ ) lowerCAmelCase : Dict = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowerCAmelCase : int = '''post_processor''' lowerCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) if tokenizer_component_instance: lowerCAmelCase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase : List[str] = tuple(state['''sep'''] ) if "cls" in state: lowerCAmelCase : Optional[int] = tuple(state['''cls'''] ) lowerCAmelCase : str = False if state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space: lowerCAmelCase : Optional[int] = add_prefix_space lowerCAmelCase : Tuple = True if state.get('''trim_offsets''' , UpperCamelCase_ ) != trim_offsets: lowerCAmelCase : List[str] = trim_offsets lowerCAmelCase : Optional[int] = True if changes_to_apply: lowerCAmelCase : Optional[int] = getattr(UpperCamelCase_ , state.pop('''type''' ) ) lowerCAmelCase : Optional[int] = component_class(**UpperCamelCase_ ) setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ ) @property # Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED def lowerCamelCase__ ( self : Optional[Any] ): if self._mask_token is None: if self.verbose: logger.error('''Using mask_token, but it is not set yet.''' ) return None return str(self._mask_token ) @mask_token.setter def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : int ): lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value lowerCAmelCase : Optional[int] = value def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Dict ): lowerCAmelCase : Optional[Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple , *UpperCamelCase_ : Optional[int] , **UpperCamelCase_ : List[str] ): lowerCAmelCase : Union[str, Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' '''to use it with pretokenized inputs.''' ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): lowerCAmelCase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple=None ): lowerCAmelCase : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : int = [self.sep_token_id] lowerCAmelCase : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : str = super()._pad( encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase : List[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase : List[str] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase : Union[str, Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ ) if needs_to_be_padded: lowerCAmelCase : Tuple = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase : Optional[Any] = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase : List[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
637
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_( a__ ): pass class snake_case_: def __init__( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Any = data lowerCAmelCase : Node | None = None def __iter__( self : int ): lowerCAmelCase : Any = self lowerCAmelCase : Union[str, Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCamelCase_ ) yield node.data lowerCAmelCase : Optional[int] = node.next_node @property def lowerCamelCase__ ( self : str ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": snake_case__ : Dict = Node(1) snake_case__ : Any = Node(2) snake_case__ : int = Node(3) snake_case__ : Any = Node(4) print(root_node.has_loop) # False snake_case__ : Tuple = root_node.next_node print(root_node.has_loop) # True snake_case__ : List[Any] = Node(5) snake_case__ : int = Node(6) snake_case__ : List[Any] = Node(5) snake_case__ : Dict = Node(6) print(root_node.has_loop) # False snake_case__ : Any = Node(1) print(root_node.has_loop) # False
637
1
"""simple docstring""" class snake_case_: def __init__( self : str , UpperCamelCase_ : List[str] ): # we need a list not a string, so do something to change the type lowerCAmelCase : Optional[Any] = arr.split(''',''' ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[Any] = [int(self.array[0] )] * len(self.array ) lowerCAmelCase : Dict = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): lowerCAmelCase : List[Any] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) lowerCAmelCase : Union[str, Any] = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": snake_case__ : Dict = input('''please input some numbers:''') snake_case__ : Optional[Any] = SubArray(whole_array) snake_case__ : Any = array.solve_sub_array() print(('''the results is:''', re))
637
"""simple docstring""" from torch import nn class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ): super().__init__() lowerCAmelCase : str = class_size lowerCAmelCase : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase : int = self.mlp(UpperCamelCase_ ) return logits
637
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case__ : Dict = logging.get_logger(__name__) class snake_case_( a__ ): __UpperCamelCase = ['''pixel_values'''] def __init__( self : Tuple , UpperCamelCase_ : bool = True , UpperCamelCase_ : Dict[str, int] = None , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : bool = True , UpperCamelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Any , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Optional[int] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} lowerCAmelCase : str = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase : List[Any] = do_resize lowerCAmelCase : int = size lowerCAmelCase : Any = resample lowerCAmelCase : Optional[Any] = do_rescale lowerCAmelCase : Union[str, Any] = rescale_factor lowerCAmelCase : List[str] = do_normalize lowerCAmelCase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase : str = do_convert_rgb def lowerCamelCase__ ( self : Any , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Dict[str, int] , UpperCamelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : Union[str, Any] , ): lowerCAmelCase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) lowerCAmelCase : Optional[int] = (size['''height'''], size['''width''']) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[int, float] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[str] , ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Union[float, List[float]] , UpperCamelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase_ : List[Any] , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : ImageInput , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Dict[str, int]] = None , UpperCamelCase_ : PILImageResampling = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[float] = None , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[float, List[float]]] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : bool = None , UpperCamelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Optional[int] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase : Optional[Any] = resample if resample is not None else self.resample lowerCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean lowerCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std lowerCAmelCase : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase : str = size if size is not None else self.size lowerCAmelCase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) lowerCAmelCase : List[Any] = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase : str = [convert_to_rgb(UpperCamelCase_ ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase : List[str] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: lowerCAmelCase : str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: lowerCAmelCase : List[str] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: lowerCAmelCase : List[Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] lowerCAmelCase : Dict = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] lowerCAmelCase : List[str] = BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCamelCase_ ) return encoded_outputs
637
"""simple docstring""" class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : str ): lowerCAmelCase : Dict = val lowerCAmelCase : str = None lowerCAmelCase : Dict = None def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ): if self.val: if val < self.val: if self.left is None: lowerCAmelCase : int = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowerCAmelCase : Any = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = val def _snake_case ( _snake_case : Tuple , _snake_case : str ): # Recursive traversal if root: inorder(root.left , _snake_case ) res.append(root.val ) inorder(root.right , _snake_case ) def _snake_case ( _snake_case : Optional[Any] ): # Build BST if len(_snake_case ) == 0: return arr lowerCAmelCase : Optional[Any] = Node(arr[0] ) for i in range(1 , len(_snake_case ) ): root.insert(arr[i] ) # Traverse BST in order. lowerCAmelCase : Optional[int] = [] inorder(_snake_case , _snake_case ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
637
1
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : list[float] ): if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) ) return round(_snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : int = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case_( a__ ): __UpperCamelCase = '''levit''' def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Tuple = image_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = kernel_size lowerCAmelCase : Dict = stride lowerCAmelCase : List[Any] = padding lowerCAmelCase : Dict = hidden_sizes lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Tuple = depths lowerCAmelCase : Dict = key_dim lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : List[Any] = patch_size lowerCAmelCase : Tuple = attention_ratio lowerCAmelCase : Optional[int] = mlp_ratio lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : List[str] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : Tuple ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : Optional[Any] ): return 1E-4
637
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Dict = { '''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''], '''tokenization_roberta''': ['''RobertaTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = ['''RobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[int] = [ '''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RobertaForCausalLM''', '''RobertaForMaskedLM''', '''RobertaForMultipleChoice''', '''RobertaForQuestionAnswering''', '''RobertaForSequenceClassification''', '''RobertaForTokenClassification''', '''RobertaModel''', '''RobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = [ '''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRobertaForCausalLM''', '''TFRobertaForMaskedLM''', '''TFRobertaForMultipleChoice''', '''TFRobertaForQuestionAnswering''', '''TFRobertaForSequenceClassification''', '''TFRobertaForTokenClassification''', '''TFRobertaMainLayer''', '''TFRobertaModel''', '''TFRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''FlaxRobertaForCausalLM''', '''FlaxRobertaForMaskedLM''', '''FlaxRobertaForMultipleChoice''', '''FlaxRobertaForQuestionAnswering''', '''FlaxRobertaForSequenceClassification''', '''FlaxRobertaForTokenClassification''', '''FlaxRobertaModel''', '''FlaxRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
637
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : str = 3 lowerCAmelCase : Tuple = 2_5_0 lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) lowerCAmelCase : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : str ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
637
1
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : Tuple , UpperCamelCase_ : int ): lowerCAmelCase : List[Any] = data lowerCAmelCase : Node | None = None lowerCAmelCase : Node | None = None def _snake_case ( _snake_case : Node | None ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def _snake_case ( _snake_case : Node | None ): return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def _snake_case ( _snake_case : Node ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def _snake_case ( ): # Main function for testing. lowerCAmelCase : Optional[Any] = Node(1 ) lowerCAmelCase : str = Node(2 ) lowerCAmelCase : List[Any] = Node(3 ) lowerCAmelCase : Tuple = Node(4 ) lowerCAmelCase : str = Node(5 ) lowerCAmelCase : Optional[int] = Node(6 ) lowerCAmelCase : Tuple = Node(7 ) lowerCAmelCase : Any = Node(8 ) lowerCAmelCase : List[str] = Node(9 ) print(is_full_binary_tree(_snake_case ) ) print(depth_of_tree(_snake_case ) ) print('''Tree is: ''' ) display(_snake_case ) if __name__ == "__main__": main()
637
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = None def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : List[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase : List[Any] = [] for i in range(_snake_case ): lowerCAmelCase : int = i / num_diffusion_timesteps lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class snake_case_( a__ , a__ ): @register_to_config def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ ) lowerCAmelCase : str = 1.0 - self.betas lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase : Tuple = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase : Any = 1.0 # setable values lowerCAmelCase : Any = None lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() ) lowerCAmelCase : List[str] = variance_type def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ): return sample def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ): lowerCAmelCase : Any = num_inference_steps lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ): if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : int = self.alphas_cumprod[t] lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : Dict = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : Tuple = self.betas[t] else: lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) ) lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase : Optional[Any] = variance.log() lowerCAmelCase : Union[str, Any] = beta.log() lowerCAmelCase : Dict = (predicted_variance + 1) / 2 lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 ) else: lowerCAmelCase : Optional[int] = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t] lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : int = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : List[Any] = self.betas[t] lowerCAmelCase : Optional[int] = self.alphas[t] else: lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase : Dict = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase : Tuple = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase : Dict = torch.clamp( UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase : int = 0 if t > 0: lowerCAmelCase : Union[str, Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device ) lowerCAmelCase : Any = self._get_variance( UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , ) if self.variance_type == "fixed_small_log": lowerCAmelCase : str = variance elif self.variance_type == "learned_range": lowerCAmelCase : Optional[Any] = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) lowerCAmelCase : List[Any] = variance * variance_noise lowerCAmelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase : int = timesteps.to(original_samples.device ) lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
637
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case__ : str = logging.get_logger(__name__) snake_case__ : Union[str, Any] = { '''google/realm-cc-news-pretrained-embedder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-encoder''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-scorer''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json''' ), '''google/realm-cc-news-pretrained-openqa''': ( '''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json''' ), '''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''', '''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''', '''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''', '''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''', # See all REALM models at https://huggingface.co/models?filter=realm } class snake_case_( a__ ): __UpperCamelCase = '''realm''' def __init__( self : Optional[int] , UpperCamelCase_ : Tuple=3_0_5_2_2 , UpperCamelCase_ : Tuple=7_6_8 , UpperCamelCase_ : List[str]=1_2_8 , UpperCamelCase_ : List[str]=1_2 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : Dict=8 , UpperCamelCase_ : Dict=3_0_7_2 , UpperCamelCase_ : List[Any]="gelu_new" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : List[str]=5_1_2 , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : Dict=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Any=2_5_6 , UpperCamelCase_ : Optional[int]=1_0 , UpperCamelCase_ : int=1E-3 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Optional[Any]=3_2_0 , UpperCamelCase_ : Any=1_3_3_5_3_7_1_8 , UpperCamelCase_ : Optional[Any]=5_0_0_0 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=0 , UpperCamelCase_ : Optional[Any]=2 , **UpperCamelCase_ : Optional[Any] , ): super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) # Common config lowerCAmelCase : Dict = vocab_size lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Optional[int] = hidden_size lowerCAmelCase : Any = retriever_proj_size lowerCAmelCase : Any = num_hidden_layers lowerCAmelCase : Any = num_attention_heads lowerCAmelCase : Dict = num_candidates lowerCAmelCase : List[str] = intermediate_size lowerCAmelCase : Tuple = hidden_act lowerCAmelCase : str = hidden_dropout_prob lowerCAmelCase : List[str] = attention_probs_dropout_prob lowerCAmelCase : Dict = initializer_range lowerCAmelCase : Union[str, Any] = type_vocab_size lowerCAmelCase : Tuple = layer_norm_eps # Reader config lowerCAmelCase : Any = span_hidden_size lowerCAmelCase : Any = max_span_width lowerCAmelCase : Tuple = reader_layer_norm_eps lowerCAmelCase : List[Any] = reader_beam_size lowerCAmelCase : str = reader_seq_len # Retrieval config lowerCAmelCase : int = num_block_records lowerCAmelCase : Union[str, Any] = searcher_beam_size
637
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class snake_case_: def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ): lowerCAmelCase : Any = parent lowerCAmelCase : Any = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[Any] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : Union[str, Any] = use_labels lowerCAmelCase : List[str] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : int = num_hidden_layers lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : List[Any] = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : Tuple = attention_probs_dropout_prob lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Tuple = type_sequence_label_size lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = num_labels lowerCAmelCase : Optional[int] = num_choices lowerCAmelCase : Tuple = scope def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Tuple = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : List[str] = None if self.use_token_type_ids: lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : int = None lowerCAmelCase : int = None lowerCAmelCase : Tuple = None if self.use_labels: lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = True lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ): lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : str = True lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] lowerCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] # select random slice lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = config_and_inputs lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = LlamaModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : str = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : List[str] = 3 lowerCAmelCase : List[str] = input_dict['''input_ids'''] lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : int = '''single_label_classification''' lowerCAmelCase : Tuple = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : Dict = '''multi_label_classification''' lowerCAmelCase : Union[str, Any] = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0} lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) @require_torch class snake_case_( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' lowerCAmelCase : int = '''Simply put, the theory of relativity states that ''' lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ ) # greedy generation outputs lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
637
1
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[float] , _snake_case : List[str] ): print(f'''Vertex\tShortest Distance from vertex {src}''' ) for i, d in enumerate(_snake_case ): print(f'''{i}\t\t{d}''' ) def _snake_case ( _snake_case : list[dict[str, int]] , _snake_case : list[float] , _snake_case : int ): for j in range(_snake_case ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[int] = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: return True return False def _snake_case ( _snake_case : list[dict[str, int]] , _snake_case : int , _snake_case : int , _snake_case : int ): lowerCAmelCase : Optional[int] = [float('''inf''' )] * vertex_count lowerCAmelCase : Tuple = 0.0 for _ in range(vertex_count - 1 ): for j in range(_snake_case ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : int = (graph[j][k] for k in ['''src''', '''dst''', '''weight''']) if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]: lowerCAmelCase : int = distance[u] + w lowerCAmelCase : Optional[Any] = check_negative_cycle(_snake_case , _snake_case , _snake_case ) if negative_cycle_exists: raise Exception('''Negative cycle found''' ) return distance if __name__ == "__main__": import doctest doctest.testmod() snake_case__ : Dict = int(input('''Enter number of vertices: ''').strip()) snake_case__ : Any = int(input('''Enter number of edges: ''').strip()) snake_case__ : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('''Edge ''', i + 1) snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = ( int(x) for x in input('''Enter source, destination, weight: ''').strip().split(''' ''') ) snake_case__ : Dict = {'''src''': src, '''dst''': dest, '''weight''': weight} snake_case__ : str = int(input('''\nEnter shortest path source:''').strip()) snake_case__ : Tuple = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
637
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ): lowerCAmelCase : Dict = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ): lowerCAmelCase : Optional[int] = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , ) for _ in range(1_0_0_0 ): lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class snake_case_( unittest.TestCase ): __UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase = 10 def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' ) class snake_case_: def __init__( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = fn def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ): return self.fn(*UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
637
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available snake_case__ : Tuple = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Dict = ['''MaskFormerFeatureExtractor'''] snake_case__ : Union[str, Any] = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : List[Any] = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] snake_case__ : Dict = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys snake_case__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
637
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_( a__ ): __UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' __UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) __UpperCamelCase = '''summarizer''' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ['''text'''] __UpperCamelCase = ['''text'''] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ): return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
637
1
"""simple docstring""" import gc import threading import time import psutil import torch class snake_case_: def __init__( self : Optional[int] ): lowerCAmelCase : Tuple = psutil.Process() lowerCAmelCase : Tuple = False def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = -1 while True: lowerCAmelCase : Optional[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Tuple = True lowerCAmelCase : Tuple = threading.Thread(target=self.peak_monitor ) lowerCAmelCase : Tuple = True self.thread.start() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[str] = False self.thread.join() return self.cpu_memory_peak snake_case__ : Optional[int] = PeakCPUMemory() def _snake_case ( ): # Time lowerCAmelCase : Dict = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem lowerCAmelCase : Optional[int] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): lowerCAmelCase : Optional[Any] = torch.cuda.memory_allocated(_snake_case ) torch.cuda.reset_peak_memory_stats() return measures def _snake_case ( _snake_case : Optional[Any] ): # Time lowerCAmelCase : Any = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem lowerCAmelCase : List[Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 lowerCAmelCase : List[str] = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): lowerCAmelCase : Any = (torch.cuda.memory_allocated(_snake_case ) - start_measures[str(_snake_case )]) / 2**20 lowerCAmelCase : Tuple = (torch.cuda.max_memory_allocated(_snake_case ) - start_measures[str(_snake_case )]) / 2**20 return measures def _snake_case ( _snake_case : Optional[Any] , _snake_case : int ): print(f'''{description}:''' ) print(f'''- Time: {measures["time"]:.2f}s''' ) for i in range(torch.cuda.device_count() ): print(f'''- GPU {i} allocated: {measures[str(_snake_case )]:.2f}MiB''' ) lowerCAmelCase : List[Any] = measures[f'''{i}-peak'''] print(f'''- GPU {i} peak: {peak:.2f}MiB''' ) print(f'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' ) print(f'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
637
"""simple docstring""" snake_case__ : List[Any] = '''Tobias Carryer''' from time import time class snake_case_: def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008 lowerCAmelCase : str = multiplier lowerCAmelCase : Optional[int] = increment lowerCAmelCase : Optional[Any] = modulo lowerCAmelCase : Optional[Any] = seed def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
637
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case__ : Tuple = False class snake_case_( unittest.TestCase ): pass @nightly @require_torch_gpu class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Dict = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase : Dict = torch.manual_seed(0 ) lowerCAmelCase : Tuple = pipe.dual_guided( prompt='''first prompt''' , image=UpperCamelCase_ , text_to_image_strength=0.75 , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained(UpperCamelCase_ , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : int = generator.manual_seed(0 ) lowerCAmelCase : Optional[int] = pipe.dual_guided( prompt='''first prompt''' , image=UpperCamelCase_ , text_to_image_strength=0.75 , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''cyberpunk 2077''' lowerCAmelCase : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) lowerCAmelCase : List[Any] = torch.manual_seed(0 ) lowerCAmelCase : Union[str, Any] = pipe.dual_guided( prompt=UpperCamelCase_ , image=UpperCamelCase_ , text_to_image_strength=0.75 , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images lowerCAmelCase : List[str] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase : Dict = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCAmelCase : str = '''A painting of a squirrel eating a burger ''' lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) lowerCAmelCase : Dict = pipe.text_to_image( prompt=UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images lowerCAmelCase : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase : Optional[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 lowerCAmelCase : Dict = pipe.image_variation(UpperCamelCase_ , generator=UpperCamelCase_ , output_type='''numpy''' ).images lowerCAmelCase : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) lowerCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
637
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: snake_case__ : Optional[Any] = None snake_case__ : Union[str, Any] = logging.get_logger(__name__) snake_case__ : List[str] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} snake_case__ : Any = { '''vocab_file''': { '''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''', '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model''' ), }, '''tokenizer_file''': { '''google/bigbird-roberta-base''': ( '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json''' ), '''google/bigbird-roberta-large''': ( '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json''' ), '''google/bigbird-base-trivia-itc''': ( '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json''' ), }, } snake_case__ : int = { '''google/bigbird-roberta-base''': 4_096, '''google/bigbird-roberta-large''': 4_096, '''google/bigbird-base-trivia-itc''': 4_096, } snake_case__ : Optional[Any] = '''▁''' class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = BigBirdTokenizer __UpperCamelCase = ['''input_ids''', '''attention_mask'''] __UpperCamelCase = [] def __init__( self : Union[str, Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : str="<unk>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="[SEP]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Any="[CLS]" , **UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Optional[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCAmelCase : Optional[int] = vocab_file lowerCAmelCase : Optional[int] = False if not self.vocab_file else True def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : str = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Tuple = [self.sep_token_id] lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
637
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin snake_case__ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right snake_case__ : Dict = 250_004 snake_case__ : List[Any] = 250_020 @require_sentencepiece @require_tokenizers class snake_case_( a__ , unittest.TestCase ): __UpperCamelCase = MBartTokenizer __UpperCamelCase = MBartTokenizerFast __UpperCamelCase = True __UpperCamelCase = True def lowerCamelCase__ ( self : int ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase : Tuple = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : int = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def lowerCamelCase__ ( self : int ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase : Optional[int] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : str = tempfile.mkdtemp() lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) lowerCAmelCase : Optional[Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f ) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase : List[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase : Any = tempfile.mkdtemp() lowerCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way lowerCAmelCase : str = tokenizer_r.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase : Optional[Any] = tempfile.mkdtemp() lowerCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) lowerCAmelCase : List[Any] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class snake_case_( unittest.TestCase ): __UpperCamelCase = '''facebook/mbart-large-en-ro''' __UpperCamelCase = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] __UpperCamelCase = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] __UpperCamelCase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def lowerCamelCase__ ( cls : Optional[Any] ): lowerCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' ) lowerCAmelCase : List[Any] = 1 return cls def lowerCamelCase__ ( self : Union[str, Any] ): self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids ) lowerCAmelCase : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2] lowerCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) lowerCAmelCase : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : List[Any] = ['''this is gunna be a long sentence ''' * 2_0] assert isinstance(src_text[0] , UpperCamelCase_ ) lowerCAmelCase : Dict = 1_0 lowerCAmelCase : List[str] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCamelCase_ ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = tempfile.mkdtemp() lowerCAmelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = MBartTokenizer.from_pretrained(UpperCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ ) @require_torch def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) lowerCAmelCase : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 1_4) , batch.input_ids.shape ) self.assertEqual((2, 1_4) , batch.attention_mask.shape ) lowerCAmelCase : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : str = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' ) lowerCAmelCase : Union[str, Any] = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=1_0 , return_tensors='''pt''' ) lowerCAmelCase : str = targets['''input_ids'''] lowerCAmelCase : int = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , { # A, test, EOS, en_XX '''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 2_5_0_0_0_1, } , )
637
"""simple docstring""" # using dfs for finding eulerian path traversal def _snake_case ( _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : str , _snake_case : List[Any]=None ): lowerCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = True, True lowerCAmelCase : int = dfs(_snake_case , _snake_case , _snake_case , _snake_case ) return path def _snake_case ( _snake_case : Optional[int] , _snake_case : Dict ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Optional[Any] = -1 for i in range(_snake_case ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 lowerCAmelCase : Optional[Any] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def _snake_case ( _snake_case : Tuple , _snake_case : List[Any] ): lowerCAmelCase : Any = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] lowerCAmelCase, lowerCAmelCase : Optional[int] = check_circuit_or_path(_snake_case , _snake_case ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return lowerCAmelCase : Dict = 1 if check == 2: lowerCAmelCase : int = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) lowerCAmelCase : List[str] = dfs(_snake_case , _snake_case , _snake_case ) print(_snake_case ) def _snake_case ( ): lowerCAmelCase : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} lowerCAmelCase : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} lowerCAmelCase : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} lowerCAmelCase : Optional[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} lowerCAmelCase : Any = { 1: [], 2: [] # all degree is zero } lowerCAmelCase : List[str] = 10 check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) check_euler(_snake_case , _snake_case ) if __name__ == "__main__": main()
637
1
"""simple docstring""" # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path snake_case__ : str = Path(__file__).resolve().parents[3] / '''src''' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) snake_case__ : Union[str, Any] = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''} snake_case__ : str = '''zero2''' snake_case__ : Optional[Any] = '''zero3''' snake_case__ : Optional[int] = [ZEROa, ZEROa] def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : str ): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param lowerCAmelCase : Union[str, Any] = parameterized.to_safe_name('''_'''.join(str(_snake_case ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test snake_case__ : str = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class snake_case_( a__ ): @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) @require_torch_multi_gpu @parameterized.expand(UpperCamelCase_ , name_func=UpperCamelCase_ ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ): self.run_and_check( stage=UpperCamelCase_ , model=UpperCamelCase_ , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Tuple = models[model] lowerCAmelCase : Any = self.run_trainer( stage=UpperCamelCase_ , model_name=UpperCamelCase_ , eval_steps=UpperCamelCase_ , num_train_epochs=1 , distributed=UpperCamelCase_ , fpaa=UpperCamelCase_ , ) self.do_checks(UpperCamelCase_ ) return output_dir def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int = 1_0 , UpperCamelCase_ : int = 1 , UpperCamelCase_ : bool = True , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Any = self.get_auto_remove_tmp_dir('''./xxx''' , after=UpperCamelCase_ ) lowerCAmelCase : Tuple = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(UpperCamelCase_ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files lowerCAmelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() lowerCAmelCase : Dict = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] lowerCAmelCase : Optional[int] = self.get_launcher(UpperCamelCase_ ) lowerCAmelCase : Tuple = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(UpperCamelCase_ , env=self.get_env() ) return output_dir def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int]=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) lowerCAmelCase : Union[str, Any] = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
637
"""simple docstring""" import os import shutil import sys import tempfile import unittest from pathlib import Path import pytest import transformers from transformers import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoTokenizer, BertConfig, BertTokenizer, BertTokenizerFast, CTRLTokenizer, GPTaTokenizer, GPTaTokenizerFast, PreTrainedTokenizerFast, RobertaTokenizer, RobertaTokenizerFast, is_tokenizers_available, ) from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.auto.tokenization_auto import ( TOKENIZER_MAPPING, get_tokenizer_config, tokenizer_class_from_name, ) from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import ( DUMMY_DIFF_TOKENIZER_IDENTIFIER, DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tokenizers, slow, ) sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : Optional[Any] = 0 @slow def lowerCamelCase__ ( self : Dict ): for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys(): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (GPTaTokenizer, GPTaTokenizerFast) ) self.assertGreater(len(UpperCamelCase_ ) , 0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (RobertaTokenizer, RobertaTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 2_0 ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) # Check that tokenizer_type ≠ model_type lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(tokenizer.vocab_size , 1_2 ) def lowerCamelCase__ ( self : Any ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Union[str, Any] ): with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(UpperCamelCase_ , '''vocab.txt''' ) ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''bert''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(UpperCamelCase_ , '''vocab.json''' ) ) shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(UpperCamelCase_ , '''merges.txt''' ) ) lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ , tokenizer_type='''gpt2''' ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): with pytest.raises(UpperCamelCase_ ): AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' ) @require_tokenizers def lowerCamelCase__ ( self : str ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: lowerCAmelCase : Dict = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , UpperCamelCase_ ) else: self.assertEqual(tokenizer.do_lower_case , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]: with self.assertRaisesRegex( UpperCamelCase_ , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ): lowerCAmelCase : Any = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' ) def lowerCamelCase__ ( self : Tuple ): # tests: https://github.com/huggingface/transformers/pull/13251 # 1. models with `-`, e.g. xlm-roberta -> xlm_roberta # 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai lowerCAmelCase : Optional[Any] = TOKENIZER_MAPPING.values() lowerCAmelCase : Optional[Any] = [] for slow_tok, fast_tok in tokenizers: if slow_tok is not None: tokenizer_names.append(slow_tok.__name__ ) if fast_tok is not None: tokenizer_names.append(fast_tok.__name__ ) for tokenizer_name in tokenizer_names: # must find the right class tokenizer_class_from_name(UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Any ): self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=UpperCamelCase_ ) , UpperCamelCase_ ) self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , UpperCamelCase_ ) @require_tokenizers def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = '''Hello, world. How are you?''' lowerCAmelCase : Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = tokenizer.tokenize(UpperCamelCase_ ) self.assertEqual('''[UNK]''' , tokens[0] ) @require_tokenizers def lowerCamelCase__ ( self : int ): lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' ) self.assertEqual(type(UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(tokenizer.model_max_length , 5_1_2 ) self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 ) self.assertEqual(tokenizer.unk_token , '''[UNK]''' ) self.assertEqual(tokenizer.padding_side , '''right''' ) self.assertEqual(tokenizer.truncation_side , '''right''' ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , (BertTokenizer, BertTokenizerFast) ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , tokenizer.__class__ ) self.assertEqual(tokenizera.vocab_size , 1_2 ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' ) # There is no fast CTRL so this always gives us a slow tokenizer. self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): # Check we can load the tokenizer config of an online model. lowerCAmelCase : Any = get_tokenizer_config('''bert-base-cased''' ) lowerCAmelCase : Optional[int] = config.pop('''_commit_hash''' , UpperCamelCase_ ) # If we ever update bert-base-cased tokenizer config, this dict here will need to be updated. self.assertEqual(UpperCamelCase_ , {'''do_lower_case''': False} ) # This model does not have a tokenizer_config so we get back an empty dict. lowerCAmelCase : Union[str, Any] = get_tokenizer_config(UpperCamelCase_ ) self.assertDictEqual(UpperCamelCase_ , {} ) # A tokenizer saved with `save_pretrained` always creates a tokenizer config. lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Dict = get_tokenizer_config(UpperCamelCase_ ) # Check the class of the tokenizer was properly saved (note that it always saves the slow class). self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' ) def lowerCamelCase__ ( self : Optional[int] ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = CustomTokenizer.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] @require_tokenizers def lowerCamelCase__ ( self : str ): try: AutoConfig.register('''custom''' , UpperCamelCase_ ) # Can register in two steps AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) del TOKENIZER_MAPPING._extra_content[CustomConfig] # Can register in one step AutoTokenizer.register( UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase_ ): AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer # and that model does not have a tokenizer.json with tempfile.TemporaryDirectory() as tmp_dir: lowerCAmelCase : Dict = BertTokenizerFast.from_pretrained(UpperCamelCase_ ) bert_tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : int = CustomTokenizerFast.from_pretrained(UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Optional[int] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase_ ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) # Test tokenizer can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: tokenizer.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(UpperCamelCase_ , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(reloaded_tokenizer.special_attribute_present ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' ) @require_tokenizers def lowerCamelCase__ ( self : Optional[int] ): class snake_case_( a__ ): __UpperCamelCase = False class snake_case_( a__ ): __UpperCamelCase = NewTokenizer __UpperCamelCase = False try: AutoConfig.register('''custom''' , UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , slow_tokenizer_class=UpperCamelCase_ ) AutoTokenizer.register(UpperCamelCase_ , fast_tokenizer_class=UpperCamelCase_ ) # If remote code is not set, the default is to use local lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : str = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote code is disabled, we load the local one. lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertFalse(tokenizer.special_attribute_present ) lowerCAmelCase : Dict = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertFalse(tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) self.assertTrue(tokenizer.special_attribute_present ) lowerCAmelCase : int = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) self.assertTrue(tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : str = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained( '''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=UpperCamelCase_ , use_fast=UpperCamelCase_ ) self.assertTrue(tokenizer.special_attribute_present ) self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def lowerCamelCase__ ( self : str ): with self.assertRaisesRegex( UpperCamelCase_ , '''bert-base is not a local folder and is not a valid model identifier''' ): lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base''' ) def lowerCamelCase__ ( self : int ): with self.assertRaisesRegex( UpperCamelCase_ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(UpperCamelCase_ , revision='''aaaaaa''' ) def lowerCamelCase__ ( self : Optional[int] ): # Make sure we have cached the tokenizer. lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: lowerCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
637
1
"""simple docstring""" from maths.prime_factors import prime_factors def _snake_case ( _snake_case : int ): if not isinstance(_snake_case , _snake_case ): lowerCAmelCase : Optional[int] = f'''Input value of [number={number}] must be an integer''' raise TypeError(_snake_case ) if number < 1: raise ValueError('''Input must be a positive integer''' ) return -1 if len(prime_factors(_snake_case ) ) % 2 else 1 if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case__ : Optional[Any] = logging.get_logger(__name__) snake_case__ : Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''} # See all LED models at https://huggingface.co/models?filter=LED snake_case__ : Optional[Any] = { '''vocab_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''', }, '''merges_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''', }, } snake_case__ : List[Any] = { '''allenai/led-base-16384''': 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _snake_case ( ): lowerCAmelCase : Optional[int] = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCAmelCase : str = bs[:] lowerCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(_snake_case ) cs.append(2**8 + n ) n += 1 lowerCAmelCase : int = [chr(_snake_case ) for n in cs] return dict(zip(_snake_case , _snake_case ) ) def _snake_case ( _snake_case : List[Any] ): lowerCAmelCase : List[str] = set() lowerCAmelCase : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase : Optional[Any] = char return pairs class snake_case_( a__ ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple="replace" , UpperCamelCase_ : Union[str, Any]="<s>" , UpperCamelCase_ : List[str]="</s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : int="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Union[str, Any]="<pad>" , UpperCamelCase_ : Tuple="<mask>" , UpperCamelCase_ : Optional[int]=False , **UpperCamelCase_ : Tuple , ): lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else bos_token lowerCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token lowerCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else sep_token lowerCAmelCase : int = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else cls_token lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token lowerCAmelCase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Tuple = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase : Any = json.load(UpperCamelCase_ ) lowerCAmelCase : Dict = {v: k for k, v in self.encoder.items()} lowerCAmelCase : Optional[int] = errors # how to handle errors in decoding lowerCAmelCase : List[Any] = bytes_to_unicode() lowerCAmelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase : Optional[int] = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) lowerCAmelCase : List[Any] = {} lowerCAmelCase : Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase : Dict = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase__ ( self : Union[str, Any] ): return len(self.encoder ) def lowerCamelCase__ ( self : Union[str, Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int ): if token in self.cache: return self.cache[token] lowerCAmelCase : List[str] = tuple(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: lowerCAmelCase : List[Any] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase, lowerCAmelCase : Any = bigram lowerCAmelCase : Tuple = [] lowerCAmelCase : Any = 0 while i < len(UpperCamelCase_ ): try: lowerCAmelCase : int = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase : int = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase : Tuple = tuple(UpperCamelCase_ ) lowerCAmelCase : Tuple = new_word if len(UpperCamelCase_ ) == 1: break else: lowerCAmelCase : Optional[Any] = get_pairs(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = ''' '''.join(UpperCamelCase_ ) lowerCAmelCase : List[str] = word return word def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Dict = [] for token in re.findall(self.pat , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase_ ).split(''' ''' ) ) return bpe_tokens def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ): return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, Any] ): return self.decoder.get(UpperCamelCase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = ''''''.join(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : int = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) lowerCAmelCase : Optional[int] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase : Tuple = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase : Any = [self.cls_token_id] lowerCAmelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Optional[Any] = [self.sep_token_id] lowerCAmelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict=False , **UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase_ ) > 0 and not text[0].isspace()): lowerCAmelCase : List[Any] = ''' ''' + text return (text, kwargs) def lowerCamelCase__ ( self : str , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ): lowerCAmelCase : Dict = super()._pad( encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ) # Load from model defaults if return_attention_mask is None: lowerCAmelCase : Tuple = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCAmelCase : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCAmelCase : List[Any] = len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCamelCase_ ) if needs_to_be_padded: lowerCAmelCase : int = len(UpperCamelCase_ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCAmelCase : Dict = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": lowerCAmelCase : int = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
637
1
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : List[Any] = '''platform''' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def _snake_case ( _snake_case : List[Any] , _snake_case : str , _snake_case : Optional[Any]=None , _snake_case : List[str]=None , _snake_case : List[Any]=None , _snake_case : Optional[Any]=None , _snake_case : Union[str, Any]=None , _snake_case : Optional[Any]=None , ): if attention_mask is None: lowerCAmelCase : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase : str = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case_: def __init__( self : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=False , UpperCamelCase_ : Optional[Any]=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=3_2 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : str=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Optional[Any]=0.02 , ): lowerCAmelCase : Dict = parent lowerCAmelCase : Optional[Any] = batch_size lowerCAmelCase : Union[str, Any] = seq_length lowerCAmelCase : Any = is_training lowerCAmelCase : Optional[Any] = use_labels lowerCAmelCase : Any = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : List[str] = num_hidden_layers lowerCAmelCase : List[Any] = num_attention_heads lowerCAmelCase : Any = intermediate_size lowerCAmelCase : List[str] = hidden_act lowerCAmelCase : Optional[int] = hidden_dropout_prob lowerCAmelCase : str = attention_probs_dropout_prob lowerCAmelCase : Any = max_position_embeddings lowerCAmelCase : Dict = eos_token_id lowerCAmelCase : Tuple = pad_token_id lowerCAmelCase : Optional[int] = bos_token_id lowerCAmelCase : Union[str, Any] = initializer_range def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase : Optional[int] = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Tuple = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Any = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self : int ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Any , UpperCamelCase_ : Any ): lowerCAmelCase : str = 2_0 lowerCAmelCase : List[str] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : str = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : List[str] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : str = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Optional[Any] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Tuple = model.decode(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : Any = 2_0 lowerCAmelCase : Union[str, Any] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : List[Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : List[str] = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ ) lowerCAmelCase : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class snake_case_( unittest.TestCase ): __UpperCamelCase = 99 def lowerCamelCase__ ( self : str ): lowerCAmelCase : Tuple = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase : str = input_ids.shape[0] lowerCAmelCase : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCamelCase__ ( self : Any ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Dict = self._get_config_and_data() lowerCAmelCase : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : int = lm_model(input_ids=UpperCamelCase_ ) lowerCAmelCase : int = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) lowerCAmelCase : List[str] = FlaxBlenderbotSmallForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : List[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) lowerCAmelCase : Dict = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase : str = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ) lowerCAmelCase : List[str] = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : int = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() lowerCAmelCase : Tuple = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case_( a__ , unittest.TestCase , a__ ): __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : str = FlaxBlenderbotSmallModelTester(self ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict ): lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) @jax.jit def encode_jitted(UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : Optional[Any] ): return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : int ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : List[str] = model_class(UpperCamelCase_ ) lowerCAmelCase : Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase : Any = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any ): return model.decode( decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : Optional[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : List[Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): for model_class_name in self.all_model_classes: lowerCAmelCase : Any = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase : Dict = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase : Tuple = model(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ )
637
"""simple docstring""" def _snake_case ( _snake_case : int = 4000000 ): lowerCAmelCase : int = [0, 1] lowerCAmelCase : List[str] = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 lowerCAmelCase : int = 0 for j in range(len(_snake_case ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(f"""{solution() = }""")
637
1
"""simple docstring""" def _snake_case ( _snake_case : list[list[float]] ): lowerCAmelCase : list[list[float]] = [] for data in source_data: for i, el in enumerate(_snake_case ): if len(_snake_case ) < i + 1: data_lists.append([] ) data_lists[i].append(float(_snake_case ) ) return data_lists def _snake_case ( _snake_case : list[list[float]] , _snake_case : list[int] ): lowerCAmelCase : list[list[float]] = [] for dlist, weight in zip(_snake_case , _snake_case ): lowerCAmelCase : Tuple = min(_snake_case ) lowerCAmelCase : Optional[int] = max(_snake_case ) lowerCAmelCase : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: lowerCAmelCase : Optional[int] = f'''Invalid weight of {weight:f} provided''' raise ValueError(_snake_case ) score_lists.append(_snake_case ) return score_lists def _snake_case ( _snake_case : list[list[float]] ): lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(_snake_case ): lowerCAmelCase : Optional[Any] = final_scores[j] + ele return final_scores def _snake_case ( _snake_case : list[list[float]] , _snake_case : list[int] ): lowerCAmelCase : int = get_data(_snake_case ) lowerCAmelCase : List[str] = calculate_each_score(_snake_case , _snake_case ) lowerCAmelCase : Dict = generate_final_scores(_snake_case ) # append scores to source data for i, ele in enumerate(_snake_case ): source_data[i].append(_snake_case ) return source_data
637
"""simple docstring""" def _snake_case ( _snake_case : float , _snake_case : list[float] ): if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) lowerCAmelCase : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) ) return round(_snake_case , ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
637
1
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ): return base * power(_snake_case , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip()) snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip()) snake_case__ : Any = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents snake_case__ : Dict = 1 / result print(f"""{base} to the power of {exponent} is {result}""")
637
"""simple docstring""" from __future__ import annotations def _snake_case ( _snake_case : list[int] , _snake_case : int ): if len(_snake_case ) == 0: return False lowerCAmelCase : List[Any] = len(_snake_case ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , _snake_case ) else: return binary_search(a_list[midpoint + 1 :] , _snake_case ) if __name__ == "__main__": snake_case__ : List[str] = input('''Enter numbers separated by comma:\n''').strip() snake_case__ : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')] snake_case__ : Dict = int(input('''Enter the number to be found in the list:\n''').strip()) snake_case__ : str = '''''' if binary_search(sequence, target) else '''not ''' print(f"""{target} was {not_str}found in {sequence}""")
637
1
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING snake_case__ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(a__ ) class snake_case_( a__ ): def __init__( self : str , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Dict ): super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) self.check_model_type(UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : str=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Tuple=None , **UpperCamelCase_ : Any ): lowerCAmelCase, lowerCAmelCase : Tuple = {}, {} if padding is not None: lowerCAmelCase : Optional[int] = padding if truncation is not None: lowerCAmelCase : Optional[Any] = truncation if top_k is not None: lowerCAmelCase : Union[str, Any] = top_k return preprocess_params, {}, postprocess_params def __call__( self : Optional[int] , UpperCamelCase_ : Union["Image.Image", str] , UpperCamelCase_ : str = None , **UpperCamelCase_ : Any ): if isinstance(UpperCamelCase_ , (Image.Image, str) ) and isinstance(UpperCamelCase_ , UpperCamelCase_ ): lowerCAmelCase : Union[str, Any] = {'''image''': image, '''question''': question} else: lowerCAmelCase : Union[str, Any] = image lowerCAmelCase : List[Any] = super().__call__(UpperCamelCase_ , **UpperCamelCase_ ) return results def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : str=False ): lowerCAmelCase : Optional[int] = load_image(inputs['''image'''] ) lowerCAmelCase : Any = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=UpperCamelCase_ , truncation=UpperCamelCase_ ) lowerCAmelCase : int = self.image_processor(images=UpperCamelCase_ , return_tensors=self.framework ) model_inputs.update(UpperCamelCase_ ) return model_inputs def lowerCamelCase__ ( self : str , UpperCamelCase_ : Tuple ): lowerCAmelCase : int = self.model(**UpperCamelCase_ ) return model_outputs def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int=5 ): if top_k > self.model.config.num_labels: lowerCAmelCase : List[Any] = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase : Optional[int] = model_outputs.logits.sigmoid()[0] lowerCAmelCase, lowerCAmelCase : List[Any] = probs.topk(UpperCamelCase_ ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowerCAmelCase : Union[str, Any] = scores.tolist() lowerCAmelCase : Optional[int] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
637
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict snake_case__ : Optional[Any] = namedtuple( '''_TestCommandArgs''', [ '''dataset''', '''name''', '''cache_dir''', '''data_dir''', '''all_configs''', '''save_infos''', '''ignore_verifications''', '''force_redownload''', '''clear_cache''', ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[str] ): return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( _snake_case : Any ): lowerCAmelCase : Union[str, Any] = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case ) lowerCAmelCase : str = TestCommand(*_snake_case ) test_command.run() lowerCAmelCase : str = os.path.join(_snake_case , '''README.md''' ) assert os.path.exists(_snake_case ) lowerCAmelCase : Tuple = DatasetInfosDict.from_directory(_snake_case ) lowerCAmelCase : List[str] = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 2351563, '''num_examples''': 10000, }, { '''name''': '''validation''', '''num_bytes''': 238418, '''num_examples''': 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: lowerCAmelCase, lowerCAmelCase : Union[str, Any] = getattr(dataset_infos['''default'''] , _snake_case ), getattr(expected_dataset_infos['''default'''] , _snake_case ) if key == "num_bytes": assert is_apercent_close(_snake_case , _snake_case ) elif key == "splits": assert list(_snake_case ) == list(_snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
637
1
"""simple docstring""" import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class snake_case_( a__ , a__ ): @register_to_config def __init__( self : str , UpperCamelCase_ : int = 1_2_8 , UpperCamelCase_ : int = 2_5_6 , UpperCamelCase_ : float = 2_000.0 , UpperCamelCase_ : int = 7_6_8 , UpperCamelCase_ : int = 1_2 , UpperCamelCase_ : int = 1_2 , UpperCamelCase_ : int = 6_4 , UpperCamelCase_ : int = 2_0_4_8 , UpperCamelCase_ : float = 0.1 , ): super().__init__() lowerCAmelCase : Optional[int] = nn.Sequential( nn.Linear(UpperCamelCase_ , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase_ ) , nn.SiLU() , ) lowerCAmelCase : Dict = nn.Embedding(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = False lowerCAmelCase : Optional[Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = nn.Dropout(p=UpperCamelCase_ ) lowerCAmelCase : int = nn.ModuleList() for lyr_num in range(UpperCamelCase_ ): # FiLM conditional T5 decoder lowerCAmelCase : Tuple = DecoderLayer(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ ) self.decoders.append(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = TaLayerNorm(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = nn.Dropout(p=UpperCamelCase_ ) lowerCAmelCase : int = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] ): lowerCAmelCase : Union[str, Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Tuple = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. lowerCAmelCase : Union[str, Any] = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype ) lowerCAmelCase : str = self.conditioning_emb(UpperCamelCase_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) lowerCAmelCase : Tuple = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. lowerCAmelCase : int = torch.broadcast_to( torch.arange(UpperCamelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , ) lowerCAmelCase : Any = self.position_encoding(UpperCamelCase_ ) lowerCAmelCase : Tuple = self.continuous_inputs_projection(UpperCamelCase_ ) inputs += position_encodings lowerCAmelCase : Tuple = self.dropout(UpperCamelCase_ ) # decoder: No padding present. lowerCAmelCase : Optional[Any] = torch.ones( decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. lowerCAmelCase : int = [(x, self.encoder_decoder_mask(UpperCamelCase_ , UpperCamelCase_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings lowerCAmelCase : Any = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 ) lowerCAmelCase : Optional[int] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 ) for lyr in self.decoders: lowerCAmelCase : Optional[int] = lyr( UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )[0] lowerCAmelCase : List[str] = self.decoder_norm(UpperCamelCase_ ) lowerCAmelCase : Any = self.post_dropout(UpperCamelCase_ ) lowerCAmelCase : int = self.spec_out(UpperCamelCase_ ) return spec_out class snake_case_( nn.Module ): def __init__( self : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=1E-6 ): super().__init__() lowerCAmelCase : Union[str, Any] = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=UpperCamelCase_ , d_kv=UpperCamelCase_ , num_heads=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ , ) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ , layer_norm_epsilon=UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Any=None , ): lowerCAmelCase : Optional[int] = self.layer[0]( UpperCamelCase_ , conditioning_emb=UpperCamelCase_ , attention_mask=UpperCamelCase_ , ) if encoder_hidden_states is not None: lowerCAmelCase : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to( encoder_hidden_states.dtype ) lowerCAmelCase : Optional[int] = self.layer[1]( UpperCamelCase_ , key_value_states=UpperCamelCase_ , attention_mask=UpperCamelCase_ , ) # Apply Film Conditional Feed Forward layer lowerCAmelCase : Dict = self.layer[-1](UpperCamelCase_ , UpperCamelCase_ ) return (hidden_states,) class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ): super().__init__() lowerCAmelCase : Union[str, Any] = TaLayerNorm(UpperCamelCase_ ) lowerCAmelCase : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = nn.Dropout(UpperCamelCase_ ) def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Optional[Any]=None , ): # pre_self_attention_layer_norm lowerCAmelCase : List[str] = self.layer_norm(UpperCamelCase_ ) if conditioning_emb is not None: lowerCAmelCase : Optional[Any] = self.FiLMLayer(UpperCamelCase_ , UpperCamelCase_ ) # Self-attention block lowerCAmelCase : int = self.attention(UpperCamelCase_ ) lowerCAmelCase : str = hidden_states + self.dropout(UpperCamelCase_ ) return hidden_states class snake_case_( nn.Module ): def __init__( self : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] ): super().__init__() lowerCAmelCase : str = Attention(query_dim=UpperCamelCase_ , heads=UpperCamelCase_ , dim_head=UpperCamelCase_ , out_bias=UpperCamelCase_ , scale_qk=UpperCamelCase_ ) lowerCAmelCase : Tuple = TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ ) lowerCAmelCase : Tuple = nn.Dropout(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : List[str]=None , ): lowerCAmelCase : Optional[Any] = self.layer_norm(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = self.attention( UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , attention_mask=attention_mask.squeeze(1 ) , ) lowerCAmelCase : Tuple = hidden_states + self.dropout(UpperCamelCase_ ) return layer_output class snake_case_( nn.Module ): def __init__( self : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str ): super().__init__() lowerCAmelCase : Optional[int] = TaDenseGatedActDense(d_model=UpperCamelCase_ , d_ff=UpperCamelCase_ , dropout_rate=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase_ ) lowerCAmelCase : int = TaLayerNorm(UpperCamelCase_ , eps=UpperCamelCase_ ) lowerCAmelCase : Dict = nn.Dropout(UpperCamelCase_ ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[Any]=None ): lowerCAmelCase : str = self.layer_norm(UpperCamelCase_ ) if conditioning_emb is not None: lowerCAmelCase : Tuple = self.film(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Tuple = self.DenseReluDense(UpperCamelCase_ ) lowerCAmelCase : List[str] = hidden_states + self.dropout(UpperCamelCase_ ) return hidden_states class snake_case_( nn.Module ): def __init__( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] ): super().__init__() lowerCAmelCase : Union[str, Any] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) lowerCAmelCase : List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ ) lowerCAmelCase : Dict = nn.Dropout(UpperCamelCase_ ) lowerCAmelCase : Any = NewGELUActivation() def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : str = self.act(self.wi_a(UpperCamelCase_ ) ) lowerCAmelCase : Tuple = self.wi_a(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = hidden_gelu * hidden_linear lowerCAmelCase : Union[str, Any] = self.dropout(UpperCamelCase_ ) lowerCAmelCase : Dict = self.wo(UpperCamelCase_ ) return hidden_states class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any]=1E-6 ): super().__init__() lowerCAmelCase : Optional[int] = nn.Parameter(torch.ones(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = eps def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 lowerCAmelCase : Optional[Any] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase_ ) lowerCAmelCase : str = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: lowerCAmelCase : Any = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class snake_case_( nn.Module ): def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.Tensor ): return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase_ , 3.0 )) )) class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ): super().__init__() lowerCAmelCase : Optional[Any] = nn.Linear(UpperCamelCase_ , out_features * 2 , bias=UpperCamelCase_ ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Tuple = self.scale_bias(UpperCamelCase_ ) lowerCAmelCase, lowerCAmelCase : Optional[int] = torch.chunk(UpperCamelCase_ , 2 , -1 ) lowerCAmelCase : List[str] = x * (1 + scale) + shift return x
637
"""simple docstring""" def _snake_case ( _snake_case : int , _snake_case : int ): return base * power(_snake_case , (exponent - 1) ) if exponent else 1 if __name__ == "__main__": print('''Raise base to the power of exponent using recursion...''') snake_case__ : Union[str, Any] = int(input('''Enter the base: ''').strip()) snake_case__ : Optional[Any] = int(input('''Enter the exponent: ''').strip()) snake_case__ : Any = power(base, abs(exponent)) if exponent < 0: # power() does not properly deal w/ negative exponents snake_case__ : Dict = 1 / result print(f"""{base} to the power of {exponent} is {result}""")
637
1
"""simple docstring""" import functools from typing import Any def _snake_case ( _snake_case : str , _snake_case : list[str] ): # Validation if not isinstance(_snake_case , _snake_case ) or len(_snake_case ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(_snake_case , _snake_case ) or not all( isinstance(_snake_case , _snake_case ) and len(_snake_case ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie lowerCAmelCase : dict[str, Any] = {} lowerCAmelCase : Tuple = '''WORD_KEEPER''' for word in words: lowerCAmelCase : Dict = trie for c in word: if c not in trie_node: lowerCAmelCase : Dict = {} lowerCAmelCase : Optional[Any] = trie_node[c] lowerCAmelCase : Optional[Any] = True lowerCAmelCase : Union[str, Any] = len(_snake_case ) # Dynamic programming method @functools.cache def is_breakable(_snake_case : int ) -> bool: if index == len_string: return True lowerCAmelCase : Optional[int] = trie for i in range(_snake_case , _snake_case ): lowerCAmelCase : Dict = trie_node.get(string[i] , _snake_case ) if trie_node is None: return False if trie_node.get(_snake_case , _snake_case ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
637
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html snake_case__ : int = '''platform''' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ): if attention_mask is None: lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 ) if decoder_attention_mask is None: lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 ) if head_mask is None: lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case_: def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ): lowerCAmelCase : Tuple = parent lowerCAmelCase : str = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : Optional[int] = is_training lowerCAmelCase : int = use_labels lowerCAmelCase : List[Any] = vocab_size lowerCAmelCase : str = hidden_size lowerCAmelCase : List[Any] = num_hidden_layers lowerCAmelCase : Any = num_attention_heads lowerCAmelCase : List[Any] = intermediate_size lowerCAmelCase : Optional[int] = hidden_act lowerCAmelCase : Dict = hidden_dropout_prob lowerCAmelCase : Optional[int] = attention_probs_dropout_prob lowerCAmelCase : List[Any] = max_position_embeddings lowerCAmelCase : Union[str, Any] = eos_token_id lowerCAmelCase : Dict = pad_token_id lowerCAmelCase : Optional[Any] = bos_token_id lowerCAmelCase : List[str] = initializer_range def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Union[str, Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def lowerCamelCase__ ( self : str ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ): lowerCAmelCase : int = 2_0 lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : str = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) lowerCAmelCase : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : List[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : List[str] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ): lowerCAmelCase : Optional[int] = 2_0 lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] ) lowerCAmelCase, lowerCAmelCase : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) lowerCAmelCase : str = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : str = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCAmelCase : Dict = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) lowerCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , ) lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ ) lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class snake_case_( unittest.TestCase ): __UpperCamelCase = 99 def lowerCamelCase__ ( self : str ): lowerCAmelCase : List[Any] = np.array( [ [7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2], [5, 9_7, 1_7, 3_9, 9_4, 4_0, 2], [7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2], [8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2], [5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding [6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2], [5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2], [4_8, 6_1, 9, 2_4, 7_1, 8_2, 2], [2_6, 1, 6_0, 4_8, 2_2, 1_3, 2], [2_1, 5, 6_2, 2_8, 1_4, 7_6, 2], [4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2], [7_0, 7_0, 5_0, 9, 2_8, 0, 2], ] , dtype=np.intaa , ) lowerCAmelCase : List[Any] = input_ids.shape[0] lowerCAmelCase : Optional[Any] = BlenderbotConfig( vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data() lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ ) lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : Any ): lowerCAmelCase : Any = BlenderbotConfig( vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , ) lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa ) lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa ) lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ ) lowerCAmelCase : str = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ ) def lowerCamelCase__ ( self : int ): lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa ) lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 ) lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case_( a__ , unittest.TestCase , a__ ): __UpperCamelCase = True __UpperCamelCase = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) __UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : Any = FlaxBlenderbotModelTester(self ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ ) @jax.jit def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ): return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase : Tuple = model_class(UpperCamelCase_ ) lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) lowerCAmelCase : List[Any] = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ): return model.decode( decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple() self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Optional[int] ): for model_class_name in self.all_model_classes: lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id lowerCAmelCase : List[str] = model(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5} lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True} lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' ) lowerCAmelCase : List[Any] = ['''Sam'''] lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' ) lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ ) lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.''' lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ ) assert generated_txt[0].strip() == tgt_text
637
1
"""simple docstring""" def _snake_case ( _snake_case : str ): if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowerCAmelCase : Optional[Any] = grid[0] for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): lowerCAmelCase : Union[str, Any] = grid[row_n] lowerCAmelCase : Optional[int] = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase : Union[str, Any] = grid[row_n] return grid[-1][-1] def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ): current_row[0] += row_above[0] for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
700
"""simple docstring""" from __future__ import annotations from PIL import Image # Define glider example snake_case__ : int = [ [0, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0], ] # Define blinker example snake_case__ : Any = [[0, 1, 0], [0, 1, 0], [0, 1, 0]] def _snake_case ( _snake_case : list[list[int]] ): lowerCAmelCase : Union[str, Any] = [] for i in range(len(_snake_case ) ): lowerCAmelCase : Any = [] for j in range(len(cells[i] ) ): # Get the number of live neighbours lowerCAmelCase : Optional[int] = 0 if i > 0 and j > 0: neighbour_count += cells[i - 1][j - 1] if i > 0: neighbour_count += cells[i - 1][j] if i > 0 and j < len(cells[i] ) - 1: neighbour_count += cells[i - 1][j + 1] if j > 0: neighbour_count += cells[i][j - 1] if j < len(cells[i] ) - 1: neighbour_count += cells[i][j + 1] if i < len(_snake_case ) - 1 and j > 0: neighbour_count += cells[i + 1][j - 1] if i < len(_snake_case ) - 1: neighbour_count += cells[i + 1][j] if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1: neighbour_count += cells[i + 1][j + 1] # Rules of the game of life (excerpt from Wikipedia): # 1. Any live cell with two or three live neighbours survives. # 2. Any dead cell with three live neighbours becomes a live cell. # 3. All other live cells die in the next generation. # Similarly, all other dead cells stay dead. lowerCAmelCase : str = cells[i][j] == 1 if ( (alive and 2 <= neighbour_count <= 3) or not alive and neighbour_count == 3 ): next_generation_row.append(1 ) else: next_generation_row.append(0 ) next_generation.append(_snake_case ) return next_generation def _snake_case ( _snake_case : list[list[int]] , _snake_case : int ): lowerCAmelCase : int = [] for _ in range(_snake_case ): # Create output image lowerCAmelCase : Union[str, Any] = Image.new('''RGB''' , (len(cells[0] ), len(_snake_case )) ) lowerCAmelCase : Union[str, Any] = img.load() # Save cells to image for x in range(len(_snake_case ) ): for y in range(len(cells[0] ) ): lowerCAmelCase : Optional[int] = 255 - cells[y][x] * 255 lowerCAmelCase : List[Any] = (colour, colour, colour) # Save image images.append(_snake_case ) lowerCAmelCase : Union[str, Any] = new_generation(_snake_case ) return images if __name__ == "__main__": snake_case__ : Union[str, Any] = generate_images(GLIDER, 16) images[0].save('''out.gif''', save_all=True, append_images=images[1:])
637
0
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case_( _snake_case ): __UpperCamelCase = ['''image_processor''', '''tokenizer'''] __UpperCamelCase = '''AutoImageProcessor''' __UpperCamelCase = '''AutoTokenizer''' def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str ): super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) lowerCAmelCase : Optional[Any] = self.image_processor def __call__( self : List[Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : int ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowerCAmelCase : List[Any] = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if images is not None: lowerCAmelCase : Any = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None and images is not None: lowerCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ ) def lowerCamelCase__ ( self : Tuple , *UpperCamelCase_ : int , **UpperCamelCase_ : List[str] ): return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def lowerCamelCase__ ( self : int , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : Optional[Any] ): return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @property def lowerCamelCase__ ( self : List[str] ): return ["input_ids", "attention_mask", "pixel_values"]
701
"""simple docstring""" from __future__ import annotations class snake_case_: def __init__( self : int , UpperCamelCase_ : str , UpperCamelCase_ : str ): lowerCAmelCase, lowerCAmelCase : List[str] = text, pattern lowerCAmelCase, lowerCAmelCase : Union[str, Any] = len(UpperCamelCase_ ), len(UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : str ): for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int ): for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase__ ( self : Dict ): # searches pattern in text and returns index positions lowerCAmelCase : Union[str, Any] = [] for i in range(self.textLen - self.patLen + 1 ): lowerCAmelCase : str = self.mismatch_in_text(UpperCamelCase_ ) if mismatch_index == -1: positions.append(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = self.match_in_pattern(self.text[mismatch_index] ) lowerCAmelCase : int = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions snake_case__ : str = '''ABAABA''' snake_case__ : List[str] = '''AB''' snake_case__ : Union[str, Any] = BoyerMooreSearch(text, pattern) snake_case__ : Optional[Any] = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
637
0
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 snake_case__ : List[str] = data_utils.TransfoXLTokenizer snake_case__ : Optional[int] = data_utils.TransfoXLCorpus snake_case__ : Optional[int] = data_utils snake_case__ : int = data_utils def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : Tuple ): if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as fp: lowerCAmelCase : List[Any] = pickle.load(_SCREAMING_SNAKE_CASE , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCAmelCase : int = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' ) lowerCAmelCase : Optional[Any] = corpus.vocab.__dict__ torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase : int = corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , _SCREAMING_SNAKE_CASE ) lowerCAmelCase : Dict = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(f'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCAmelCase : int = os.path.abspath(_SCREAMING_SNAKE_CASE ) lowerCAmelCase : Dict = os.path.abspath(_SCREAMING_SNAKE_CASE ) print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCAmelCase : Optional[Any] = TransfoXLConfig() else: lowerCAmelCase : List[Any] = TransfoXLConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(f'''Building PyTorch model from configuration: {config}''' ) lowerCAmelCase : Optional[int] = TransfoXLLMHeadModel(_SCREAMING_SNAKE_CASE ) lowerCAmelCase : List[str] = load_tf_weights_in_transfo_xl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model lowerCAmelCase : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase : str = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'''Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}''' ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) print(f'''Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}''' ) with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": snake_case__ : List[str] = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) snake_case__ : Union[str, Any] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
702
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case_( a__ ): pass class snake_case_: def __init__( self : Any , UpperCamelCase_ : Any ): lowerCAmelCase : Any = data lowerCAmelCase : Node | None = None def __iter__( self : int ): lowerCAmelCase : Any = self lowerCAmelCase : Union[str, Any] = [] while node: if node in visited: raise ContainsLoopError visited.append(UpperCamelCase_ ) yield node.data lowerCAmelCase : Optional[int] = node.next_node @property def lowerCamelCase__ ( self : str ): try: list(self ) return False except ContainsLoopError: return True if __name__ == "__main__": snake_case__ : Dict = Node(1) snake_case__ : Any = Node(2) snake_case__ : int = Node(3) snake_case__ : Any = Node(4) print(root_node.has_loop) # False snake_case__ : Tuple = root_node.next_node print(root_node.has_loop) # True snake_case__ : List[Any] = Node(5) snake_case__ : int = Node(6) snake_case__ : List[Any] = Node(5) snake_case__ : Dict = Node(6) print(root_node.has_loop) # False snake_case__ : Any = Node(1) print(root_node.has_loop) # False
637
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('''Googling.....''') snake_case__ : Optional[Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) snake_case__ : List[str] = requests.get(url, headers={'''UserAgent''': UserAgent().random}) # res.raise_for_status() with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) snake_case__ : List[str] = BeautifulSoup(res.text, '''html.parser''') snake_case__ : List[str] = list(soup.select('''.eZt8xd'''))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('''href''')) else: webbrowser.open(f"""https://google.com{link.get("href")}""")
703
"""simple docstring""" from torch import nn class snake_case_( nn.Module ): def __init__( self : int , UpperCamelCase_ : int , UpperCamelCase_ : int ): super().__init__() lowerCAmelCase : str = class_size lowerCAmelCase : Dict = embed_size # self.mlp1 = nn.Linear(embed_size, embed_size) # self.mlp2 = (nn.Linear(embed_size, class_size)) lowerCAmelCase : Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Tuple ): # hidden_state = nn.functional.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) lowerCAmelCase : int = self.mlp(UpperCamelCase_ ) return logits
637
0
"""simple docstring""" from __future__ import annotations from collections.abc import Sequence from typing import Literal def _snake_case ( _snake_case : Optional[int] , _snake_case : Any ): lowerCAmelCase : str = list(_lowercase ) lowerCAmelCase : int = list(_lowercase ) lowerCAmelCase : Tuple = 0 for i in range(len(_lowercase ) ): if lista[i] != lista[i]: count += 1 lowerCAmelCase : str = '_' if count > 1: return False else: return "".join(_lowercase ) def _snake_case ( _snake_case : Any ): lowerCAmelCase : int = [] while True: lowerCAmelCase : Dict = ['$'] * len(_lowercase ) lowerCAmelCase : Dict = [] for i in range(len(_lowercase ) ): for j in range(i + 1 , len(_lowercase ) ): lowerCAmelCase : Union[str, Any] = compare_string(binary[i] , binary[j] ) if k is False: lowerCAmelCase : Union[str, Any] = '*' lowerCAmelCase : Optional[Any] = '*' temp.append('''X''' ) for i in range(len(_lowercase ) ): if checka[i] == "$": pi.append(binary[i] ) if len(_lowercase ) == 0: return pi lowerCAmelCase : Any = list(set(_lowercase ) ) def _snake_case ( _snake_case : Tuple , _snake_case : Any ): lowerCAmelCase : Optional[int] = [] for minterm in minterms: lowerCAmelCase : Tuple = '' for _ in range(_lowercase ): lowerCAmelCase : Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(_lowercase ) return temp def _snake_case ( _snake_case : Tuple , _snake_case : Any , _snake_case : Tuple ): lowerCAmelCase : int = list(_lowercase ) lowerCAmelCase : List[Any] = list(_lowercase ) lowerCAmelCase : Any = 0 for i in range(len(_lowercase ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def _snake_case ( _snake_case : str , _snake_case : Optional[Any] ): lowerCAmelCase : Optional[int] = [] lowerCAmelCase : Tuple = [0] * len(_lowercase ) for i in range(len(chart[0] ) ): lowerCAmelCase : Tuple = 0 lowerCAmelCase : Optional[int] = -1 for j in range(len(_lowercase ) ): if chart[j][i] == 1: count += 1 lowerCAmelCase : List[Any] = j if count == 1: lowerCAmelCase : Union[str, Any] = 1 for i in range(len(_lowercase ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(_lowercase ) ): lowerCAmelCase : Union[str, Any] = 0 temp.append(prime_implicants[i] ) while True: lowerCAmelCase : Tuple = 0 lowerCAmelCase : Tuple = -1 lowerCAmelCase : Tuple = 0 for i in range(len(_lowercase ) ): lowerCAmelCase : int = chart[i].count(1 ) if count_n > max_n: lowerCAmelCase : Any = count_n lowerCAmelCase : List[Any] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(_lowercase ) ): lowerCAmelCase : Any = 0 def _snake_case ( _snake_case : List[Any] , _snake_case : int ): lowerCAmelCase : Optional[Any] = [[0 for x in range(len(_lowercase ) )] for x in range(len(_lowercase ) )] for i in range(len(_lowercase ) ): lowerCAmelCase : int = prime_implicants[i].count('''_''' ) for j in range(len(_lowercase ) ): if is_for_table(prime_implicants[i] , binary[j] , _lowercase ): lowerCAmelCase : List[str] = 1 return chart def _snake_case ( ): lowerCAmelCase : Tuple = int(input('''Enter the no. of variables\n''' ) ) lowerCAmelCase : Optional[int] = [ float(_lowercase ) for x in input( '''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split() ] lowerCAmelCase : Dict = decimal_to_binary(_lowercase , _lowercase ) lowerCAmelCase : List[Any] = check(_lowercase ) print('''Prime Implicants are:''' ) print(_lowercase ) lowerCAmelCase : str = prime_implicant_chart(_lowercase , _lowercase ) lowerCAmelCase : Dict = selection(_lowercase , _lowercase ) print('''Essential Prime Implicants are:''' ) print(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() main()
704
"""simple docstring""" class snake_case_: def __init__( self : Union[str, Any] , UpperCamelCase_ : str ): lowerCAmelCase : Dict = val lowerCAmelCase : str = None lowerCAmelCase : Dict = None def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ): if self.val: if val < self.val: if self.left is None: lowerCAmelCase : int = Node(UpperCamelCase_ ) else: self.left.insert(UpperCamelCase_ ) elif val > self.val: if self.right is None: lowerCAmelCase : Any = Node(UpperCamelCase_ ) else: self.right.insert(UpperCamelCase_ ) else: lowerCAmelCase : Optional[Any] = val def _snake_case ( _snake_case : Tuple , _snake_case : str ): # Recursive traversal if root: inorder(root.left , _snake_case ) res.append(root.val ) inorder(root.right , _snake_case ) def _snake_case ( _snake_case : Optional[Any] ): # Build BST if len(_snake_case ) == 0: return arr lowerCAmelCase : Optional[Any] = Node(arr[0] ) for i in range(1 , len(_snake_case ) ): root.insert(arr[i] ) # Traverse BST in order. lowerCAmelCase : Optional[int] = [] inorder(_snake_case , _snake_case ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
637
0
"""simple docstring""" import functools def _snake_case ( _snake_case : list[int] , _snake_case : list[int] ): if not isinstance(_A , _A ) or not all(isinstance(_A , _A ) for day in days ): raise ValueError('''The parameter days should be a list of integers''' ) if len(_A ) != 3 or not all(isinstance(_A , _A ) for cost in costs ): raise ValueError('''The parameter costs should be a list of three integers''' ) if len(_A ) == 0: return 0 if min(_A ) <= 0: raise ValueError('''All days elements should be greater than 0''' ) if max(_A ) >= 366: raise ValueError('''All days elements should be less than 366''' ) lowerCAmelCase : str = set(_A ) @functools.cache def dynamic_programming(_snake_case : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
705
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case__ : Tuple = logging.get_logger(__name__) snake_case__ : int = { '''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''', # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case_( a__ ): __UpperCamelCase = '''levit''' def __init__( self : str , UpperCamelCase_ : Union[str, Any]=2_2_4 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : Union[str, Any]=3 , UpperCamelCase_ : int=2 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : Dict=[1_2_8, 2_5_6, 3_8_4] , UpperCamelCase_ : Optional[Any]=[4, 8, 1_2] , UpperCamelCase_ : Dict=[4, 4, 4] , UpperCamelCase_ : Any=[1_6, 1_6, 1_6] , UpperCamelCase_ : str=0 , UpperCamelCase_ : int=[2, 2, 2] , UpperCamelCase_ : Optional[Any]=[2, 2, 2] , UpperCamelCase_ : str=0.02 , **UpperCamelCase_ : List[str] , ): super().__init__(**UpperCamelCase_ ) lowerCAmelCase : Tuple = image_size lowerCAmelCase : int = num_channels lowerCAmelCase : Optional[int] = kernel_size lowerCAmelCase : Dict = stride lowerCAmelCase : List[Any] = padding lowerCAmelCase : Dict = hidden_sizes lowerCAmelCase : List[str] = num_attention_heads lowerCAmelCase : Tuple = depths lowerCAmelCase : Dict = key_dim lowerCAmelCase : Union[str, Any] = drop_path_rate lowerCAmelCase : List[Any] = patch_size lowerCAmelCase : Tuple = attention_ratio lowerCAmelCase : Optional[int] = mlp_ratio lowerCAmelCase : Union[str, Any] = initializer_range lowerCAmelCase : List[str] = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case_( a__ ): __UpperCamelCase = version.parse('''1.11''' ) @property def lowerCamelCase__ ( self : Tuple ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCamelCase__ ( self : Optional[Any] ): return 1E-4
637
0
"""simple docstring""" def _snake_case ( _snake_case : Optional[int] ): lowerCAmelCase : List[str] = set() # edges = list of graph's edges lowerCAmelCase : Optional[int] = get_edges(lowerCamelCase_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: lowerCAmelCase : List[str] = edges.pop() chosen_vertices.add(lowerCamelCase_ ) chosen_vertices.add(lowerCamelCase_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowerCamelCase_ ) return chosen_vertices def _snake_case ( _snake_case : Optional[Any] ): lowerCAmelCase : Optional[int] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
706
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ): lowerCAmelCase : str = 3 lowerCAmelCase : Tuple = 2_5_0 lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length return input_ids, scores def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) lowerCAmelCase : Union[str, Any] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=1_0 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 ) lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : Dict = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 1_0 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) ) def lowerCamelCase__ ( self : str ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 ) with self.assertWarns(UpperCamelCase_ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 ) lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 ) self.assertEqual(len(UpperCamelCase_ ) , 1 )
637
0
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { "CarlCochet/trajectory-transformer-halfcheetah-medium-v2": ( "https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json" ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class snake_case_( UpperCAmelCase_ ): __UpperCamelCase = 'trajectory_transformer' __UpperCamelCase = ['past_key_values'] __UpperCamelCase = { 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : Optional[int] , UpperCamelCase_ : Union[str, Any]=1_0_0 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Union[str, Any]=1 , UpperCamelCase_ : List[str]=1 , UpperCamelCase_ : List[str]=2_4_9 , UpperCamelCase_ : Optional[int]=6 , UpperCamelCase_ : Any=1_7 , UpperCamelCase_ : int=2_5 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : int=4 , UpperCamelCase_ : List[str]=1_2_8 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Tuple=0.0_006 , UpperCamelCase_ : Optional[Any]=5_1_2 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[str]=1E-12 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : Any=5_0_2_5_6 , UpperCamelCase_ : Union[str, Any]=5_0_2_5_6 , **UpperCamelCase_ : str , ): lowerCAmelCase : Optional[Any] = vocab_size lowerCAmelCase : Union[str, Any] = action_weight lowerCAmelCase : Any = reward_weight lowerCAmelCase : str = value_weight lowerCAmelCase : List[str] = max_position_embeddings lowerCAmelCase : Dict = block_size lowerCAmelCase : Union[str, Any] = action_dim lowerCAmelCase : Tuple = observation_dim lowerCAmelCase : Any = transition_dim lowerCAmelCase : Optional[int] = learning_rate lowerCAmelCase : Optional[int] = n_layer lowerCAmelCase : Tuple = n_head lowerCAmelCase : int = n_embd lowerCAmelCase : List[str] = embd_pdrop lowerCAmelCase : Optional[Any] = attn_pdrop lowerCAmelCase : Tuple = resid_pdrop lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : List[str] = layer_norm_eps lowerCAmelCase : int = kaiming_initializer_range lowerCAmelCase : int = use_cache super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
707
"""simple docstring""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class snake_case_( a__ ): __UpperCamelCase = 42 __UpperCamelCase = None def _snake_case ( _snake_case : Dict , _snake_case : List[str]=0.999 , _snake_case : Dict="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_snake_case : List[Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_snake_case : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowerCAmelCase : List[Any] = [] for i in range(_snake_case ): lowerCAmelCase : int = i / num_diffusion_timesteps lowerCAmelCase : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(_snake_case ) / alpha_bar_fn(_snake_case ) , _snake_case ) ) return torch.tensor(_snake_case , dtype=torch.floataa ) class snake_case_( a__ , a__ ): @register_to_config def __init__( self : Any , UpperCamelCase_ : int = 1_0_0_0 , UpperCamelCase_ : str = "fixed_small_log" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[float] = 1.0 , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) lowerCAmelCase : Any = betas_for_alpha_bar(UpperCamelCase_ ) lowerCAmelCase : str = 1.0 - self.betas lowerCAmelCase : Union[str, Any] = torch.cumprod(self.alphas , dim=0 ) lowerCAmelCase : Tuple = torch.tensor(1.0 ) # standard deviation of the initial noise distribution lowerCAmelCase : Any = 1.0 # setable values lowerCAmelCase : Any = None lowerCAmelCase : Any = torch.from_numpy(np.arange(0 , UpperCamelCase_ )[::-1].copy() ) lowerCAmelCase : List[str] = variance_type def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ): return sample def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Union[str, torch.device] = None ): lowerCAmelCase : Any = num_inference_steps lowerCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) lowerCAmelCase : Tuple = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) lowerCAmelCase : Any = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Any=None ): if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : int = self.alphas_cumprod[t] lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : Dict = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : Tuple = self.betas[t] else: lowerCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowerCAmelCase : Optional[Any] = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: lowerCAmelCase : List[str] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": lowerCAmelCase : Any = torch.log(torch.clamp(UpperCamelCase_ , min=1E-20 ) ) lowerCAmelCase : Union[str, Any] = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler lowerCAmelCase : Optional[Any] = variance.log() lowerCAmelCase : Union[str, Any] = beta.log() lowerCAmelCase : Dict = (predicted_variance + 1) / 2 lowerCAmelCase : Union[str, Any] = frac * max_log + (1 - frac) * min_log return variance def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : bool = True , ): lowerCAmelCase : Optional[Any] = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": lowerCAmelCase, lowerCAmelCase : List[Any] = torch.split(UpperCamelCase_ , sample.shape[1] , dim=1 ) else: lowerCAmelCase : Optional[int] = None # 1. compute alphas, betas if prev_timestep is None: lowerCAmelCase : Any = t - 1 lowerCAmelCase : Union[str, Any] = self.alphas_cumprod[t] lowerCAmelCase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one lowerCAmelCase : int = 1 - alpha_prod_t lowerCAmelCase : str = 1 - alpha_prod_t_prev if prev_timestep == t - 1: lowerCAmelCase : List[Any] = self.betas[t] lowerCAmelCase : Optional[int] = self.alphas[t] else: lowerCAmelCase : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev lowerCAmelCase : Dict = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowerCAmelCase : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowerCAmelCase : Tuple = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`''' ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowerCAmelCase : Dict = torch.clamp( UpperCamelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : int = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t lowerCAmelCase : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowerCAmelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise lowerCAmelCase : int = 0 if t > 0: lowerCAmelCase : Union[str, Any] = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase_ , device=model_output.device ) lowerCAmelCase : Any = self._get_variance( UpperCamelCase_ , predicted_variance=UpperCamelCase_ , prev_timestep=UpperCamelCase_ , ) if self.variance_type == "fixed_small_log": lowerCAmelCase : str = variance elif self.variance_type == "learned_range": lowerCAmelCase : Optional[Any] = (0.5 * variance).exp() else: raise ValueError( F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`''' ''' for the UnCLIPScheduler.''' ) lowerCAmelCase : List[Any] = variance * variance_noise lowerCAmelCase : int = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase_ , pred_original_sample=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples lowerCAmelCase : Tuple = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) lowerCAmelCase : int = timesteps.to(original_samples.device ) lowerCAmelCase : Dict = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase : str = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : Any = sqrt_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase : Tuple = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): lowerCAmelCase : int = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) lowerCAmelCase : Dict = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
637
0
"""simple docstring""" import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors snake_case__ : Any = logging.getLogger(__name__) class snake_case_( __A ): __UpperCamelCase = '''sequence-classification''' def __init__( self : Any , UpperCamelCase_ : Optional[int] ): if type(UpperCamelCase_ ) == dict: lowerCAmelCase : Any = Namespace(**UpperCamelCase_ ) lowerCAmelCase : int = glue_output_modes[hparams.task] lowerCAmelCase : Any = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase_ , UpperCamelCase_ , self.mode ) def lowerCamelCase__ ( self : Dict , **UpperCamelCase_ : Any ): return self.model(**UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase : str = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None lowerCAmelCase : Union[str, Any] = self(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = outputs[0] lowerCAmelCase : str = self.trainer.lr_schedulers[0]['''scheduler'''] lowerCAmelCase : Union[str, Any] = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Union[str, Any] = self.hparams lowerCAmelCase : str = processors[args.task]() lowerCAmelCase : Optional[int] = processor.get_labels() for mode in ["train", "dev"]: lowerCAmelCase : List[str] = self._feature_file(UpperCamelCase_ ) if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache: logger.info('''Loading features from cached file %s''' , UpperCamelCase_ ) else: logger.info('''Creating features from dataset file at %s''' , args.data_dir ) lowerCAmelCase : Optional[int] = ( processor.get_dev_examples(args.data_dir ) if mode == '''dev''' else processor.get_train_examples(args.data_dir ) ) lowerCAmelCase : Optional[int] = convert_examples_to_features( UpperCamelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info('''Saving features into cached file %s''' , UpperCamelCase_ ) torch.save(UpperCamelCase_ , UpperCamelCase_ ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : bool = False ): lowerCAmelCase : Tuple = '''dev''' if mode == '''test''' else mode lowerCAmelCase : Any = self._feature_file(UpperCamelCase_ ) logger.info('''Loading features from cached file %s''' , UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = torch.load(UpperCamelCase_ ) lowerCAmelCase : int = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) lowerCAmelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) lowerCAmelCase : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": lowerCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase : int = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ , shuffle=UpperCamelCase_ , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict ): lowerCAmelCase : Any = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]} if self.config.model_type not in ["distilbert", "bart"]: lowerCAmelCase : Any = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None lowerCAmelCase : Union[str, Any] = self(**UpperCamelCase_ ) lowerCAmelCase : List[Any] = outputs[:2] lowerCAmelCase : Union[str, Any] = logits.detach().cpu().numpy() lowerCAmelCase : int = inputs['''labels'''].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Dict ): lowerCAmelCase : Optional[Any] = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item() lowerCAmelCase : List[str] = np.concatenate([x['''pred'''] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": lowerCAmelCase : Optional[Any] = np.argmax(UpperCamelCase_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": lowerCAmelCase : int = np.squeeze(UpperCamelCase_ ) lowerCAmelCase : Any = np.concatenate([x['''target'''] for x in outputs] , axis=0 ) lowerCAmelCase : int = [[] for _ in range(out_label_ids.shape[0] )] lowerCAmelCase : Tuple = [[] for _ in range(out_label_ids.shape[0] )] lowerCAmelCase : Any = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase_ , UpperCamelCase_ )} lowerCAmelCase : Optional[Any] = dict(results.items() ) lowerCAmelCase : str = results return ret, preds_list, out_label_list def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : list ): lowerCAmelCase : Dict = self._eval_end(UpperCamelCase_ ) lowerCAmelCase : Any = ret['''log'''] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple ): lowerCAmelCase : Union[str, Any] = self._eval_end(UpperCamelCase_ ) lowerCAmelCase : Optional[Any] = ret['''log'''] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def lowerCamelCase__ ( UpperCamelCase_ : str , UpperCamelCase_ : Any ): BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ ) parser.add_argument( '''--max_seq_length''' , default=1_2_8 , type=UpperCamelCase_ , help=( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) , ) parser.add_argument( '''--task''' , default='''''' , type=UpperCamelCase_ , required=UpperCamelCase_ , help='''The GLUE task to run''' , ) parser.add_argument( '''--gpus''' , default=0 , type=UpperCamelCase_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) return parser def _snake_case ( ): lowerCAmelCase : Tuple = argparse.ArgumentParser() add_generic_args(__A , os.getcwd() ) lowerCAmelCase : Any = GLUETransformer.add_model_specific_args(__A , os.getcwd() ) lowerCAmelCase : Optional[Any] = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: lowerCAmelCase : Tuple = os.path.join( '''./results''' , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , ) os.makedirs(args.output_dir ) lowerCAmelCase : str = GLUETransformer(__A ) lowerCAmelCase : Any = generic_train(__A , __A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: lowerCAmelCase : Tuple = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__A ) ) lowerCAmelCase : int = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(__A ) if __name__ == "__main__": main()
708
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class snake_case_: def __init__( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Optional[Any]=7 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : Tuple=3_2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : str=4 , UpperCamelCase_ : Any=3_7 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Union[str, Any]=0.1 , UpperCamelCase_ : Union[str, Any]=5_1_2 , UpperCamelCase_ : Union[str, Any]=1_6 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Optional[Any]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Any=4 , UpperCamelCase_ : int=None , ): lowerCAmelCase : Any = parent lowerCAmelCase : Any = batch_size lowerCAmelCase : List[Any] = seq_length lowerCAmelCase : str = is_training lowerCAmelCase : List[Any] = use_input_mask lowerCAmelCase : Optional[int] = use_token_type_ids lowerCAmelCase : Union[str, Any] = use_labels lowerCAmelCase : List[str] = vocab_size lowerCAmelCase : Tuple = hidden_size lowerCAmelCase : int = num_hidden_layers lowerCAmelCase : Union[str, Any] = num_attention_heads lowerCAmelCase : Optional[int] = intermediate_size lowerCAmelCase : List[Any] = hidden_act lowerCAmelCase : int = hidden_dropout_prob lowerCAmelCase : Tuple = attention_probs_dropout_prob lowerCAmelCase : Optional[Any] = max_position_embeddings lowerCAmelCase : Optional[int] = type_vocab_size lowerCAmelCase : Tuple = type_sequence_label_size lowerCAmelCase : List[str] = initializer_range lowerCAmelCase : str = num_labels lowerCAmelCase : Optional[int] = num_choices lowerCAmelCase : Tuple = scope def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase : Tuple = None if self.use_input_mask: lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase : List[str] = None if self.use_token_type_ids: lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase : int = None lowerCAmelCase : int = None lowerCAmelCase : Tuple = None if self.use_labels: lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase : List[Any] = LlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : Any , ): lowerCAmelCase : Tuple = True lowerCAmelCase : Optional[int] = LlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) lowerCAmelCase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : str , ): lowerCAmelCase : Optional[Any] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Union[str, Any] = True lowerCAmelCase : str = True lowerCAmelCase : Tuple = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass lowerCAmelCase : Optional[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , ) lowerCAmelCase : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowerCAmelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowerCAmelCase : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCAmelCase : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) lowerCAmelCase : Dict = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] lowerCAmelCase : str = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )['''hidden_states'''][0] # select random slice lowerCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCAmelCase : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCAmelCase : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Dict = self.prepare_config_and_inputs() ( ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ( lowerCAmelCase ), ) : Tuple = config_and_inputs lowerCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class snake_case_( a__ , a__ , a__ , unittest.TestCase ): __UpperCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __UpperCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __UpperCamelCase = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def lowerCamelCase__ ( self : Optional[Any] ): lowerCAmelCase : Any = LlamaModelTester(self ) lowerCAmelCase : Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=3_7 ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCAmelCase : str = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : List[str] = 3 lowerCAmelCase : List[str] = input_dict['''input_ids'''] lowerCAmelCase : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Union[str, Any] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase, lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : int = '''single_label_classification''' lowerCAmelCase : Tuple = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCAmelCase : Tuple = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Any = 3 lowerCAmelCase : Dict = '''multi_label_classification''' lowerCAmelCase : Union[str, Any] = input_dict['''input_ids'''] lowerCAmelCase : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) lowerCAmelCase : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCAmelCase : Optional[int] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() lowerCAmelCase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' ) def lowerCamelCase__ ( self : Optional[Any] ): pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase : Optional[int] = ids_tensor([1, 1_0] , config.vocab_size ) lowerCAmelCase : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : List[Any] = LlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() lowerCAmelCase : Optional[int] = original_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : List[Any] = original_model(UpperCamelCase_ ).last_hidden_state set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights lowerCAmelCase : int = {'''type''': scaling_type, '''factor''': 10.0} lowerCAmelCase : List[str] = LlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() lowerCAmelCase : Union[str, Any] = scaled_model(UpperCamelCase_ ).last_hidden_state lowerCAmelCase : Optional[int] = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) @require_torch class snake_case_( unittest.TestCase ): @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : Tuple = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 lowerCAmelCase : int = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Dict = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' ) lowerCAmelCase : str = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : Any = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Tuple = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' ) @slow def lowerCamelCase__ ( self : Optional[int] ): lowerCAmelCase : int = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' ) lowerCAmelCase : List[Any] = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 lowerCAmelCase : List[str] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off lowerCAmelCase : Dict = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) @unittest.skip( '''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' ) @slow def lowerCamelCase__ ( self : List[str] ): lowerCAmelCase : Optional[Any] = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8] lowerCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' ) lowerCAmelCase : Any = model(torch.tensor(UpperCamelCase_ ) ) lowerCAmelCase : Optional[Any] = torch.tensor( [[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1E-2 , rtol=1E-2 ) # fmt: off lowerCAmelCase : Any = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] ) # fmt: on torch.testing.assert_close(out[0, 0, :3_0] , UpperCamelCase_ , atol=1E-5 , rtol=1E-5 ) @unittest.skip('''Model is curently gated''' ) @slow def lowerCamelCase__ ( self : List[Any] ): lowerCAmelCase : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi''' lowerCAmelCase : int = '''Simply put, the theory of relativity states that ''' lowerCAmelCase : str = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' ) lowerCAmelCase : Optional[int] = tokenizer.encode(UpperCamelCase_ , return_tensors='''pt''' ) lowerCAmelCase : List[Any] = LlamaForCausalLM.from_pretrained( '''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=UpperCamelCase_ ) # greedy generation outputs lowerCAmelCase : int = model.generate(UpperCamelCase_ , max_new_tokens=6_4 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ ) lowerCAmelCase : int = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
637
0
"""simple docstring""" from math import factorial, radians def _snake_case ( _snake_case : float , _snake_case : int = 18 , _snake_case : int = 10 ): lowerCAmelCase : int = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians lowerCAmelCase : Optional[int] = radians(__lowerCAmelCase ) lowerCAmelCase : int = angle_in_radians lowerCAmelCase : Dict = 3 lowerCAmelCase : List[str] = -1 for _ in range(__lowerCAmelCase ): result += (b * (angle_in_radians**a)) / factorial(__lowerCAmelCase ) lowerCAmelCase : Optional[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(__lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": __import__('''doctest''').testmod()
709
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def _snake_case ( _snake_case : Tuple , _snake_case : Union[str, Any]=10 ): lowerCAmelCase : Dict = [] for _ in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def _snake_case ( _snake_case : Optional[int] , _snake_case : int=10 ): lowerCAmelCase : Optional[int] = [] for step in range(_snake_case ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase : List[str] = os.path.join(_snake_case , '''schedule.bin''' ) torch.save(scheduler.state_dict() , _snake_case ) lowerCAmelCase : List[Any] = torch.load(_snake_case ) scheduler.load_state_dict(_snake_case ) return lrs @require_torch class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Any ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ ) def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : List[str] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : List[Any] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(1_0_0 ): lowerCAmelCase : Union[str, Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] ) lowerCAmelCase : Optional[int] = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowerCAmelCase : Any = Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , ) for _ in range(1_0_0_0 ): lowerCAmelCase : List[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class snake_case_( unittest.TestCase ): __UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None __UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None __UpperCamelCase = 10 def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None ): self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) ) for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ): self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ ) def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 1_0} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowerCAmelCase : Optional[Any] = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'''num_warmup_steps''': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, '''num_cycles''': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'''num_warmup_steps''': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): lowerCAmelCase, lowerCAmelCase : Union[str, Any] = data lowerCAmelCase : List[Any] = scheduler_func(self.optimizer , **UpperCamelCase_ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowerCAmelCase : str = unwrap_schedule(UpperCamelCase_ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase_ , UpperCamelCase_ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowerCAmelCase : Optional[int] = scheduler_func(self.optimizer , **UpperCamelCase_ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule lowerCAmelCase : List[Any] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' ) class snake_case_: def __init__( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = fn def __call__( self : Union[str, Any] , *UpperCamelCase_ : Optional[Any] , **UpperCamelCase_ : List[Any] ): return self.fn(*UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Optional[int] ): lowerCAmelCase : Union[str, Any] = list(map(self , scheduler.lr_lambdas ) )
637
0
"""simple docstring""" from __future__ import annotations snake_case__ : Tuple = 1.6021e-19 # units = C def _snake_case ( _snake_case : str , _snake_case : List[Any] , _snake_case : int , ): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
710
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class snake_case_( a__ ): __UpperCamelCase = '''philschmid/bart-large-cnn-samsum''' __UpperCamelCase = ( '''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, ''' '''and returns a summary of the text.''' ) __UpperCamelCase = '''summarizer''' __UpperCamelCase = AutoTokenizer __UpperCamelCase = AutoModelForSeqaSeqLM __UpperCamelCase = ['''text'''] __UpperCamelCase = ['''text'''] def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : int ): return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' , truncation=UpperCamelCase_ ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : str ): return self.model.generate(**UpperCamelCase_ )[0] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Tuple ): return self.pre_processor.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
637
0
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _snake_case ( _snake_case : int , _snake_case : Union[str, Any] ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _snake_case ( _snake_case : Optional[int] , _snake_case : int , _snake_case : Optional[int] ) -> Union[str, Any]: lowerCAmelCase : Dict = tmp_path / "cache" lowerCAmelCase : List[str] = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def _snake_case ( _snake_case : int , _snake_case : Dict , _snake_case : Optional[Any] ) -> str: lowerCAmelCase : int = tmp_path / "cache" lowerCAmelCase : Tuple = {"text": "string"} lowerCAmelCase : Optional[Any] = features.copy() if features else default_expected_features lowerCAmelCase : List[Any] = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase : Any = TextDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _snake_case ( _snake_case : Dict , _snake_case : str , _snake_case : int ) -> List[Any]: lowerCAmelCase : Union[str, Any] = tmp_path / "cache" lowerCAmelCase : List[Any] = {"text": "string"} lowerCAmelCase : Dict = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def _snake_case ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : List[Any] ) -> Dict: if issubclass(_lowerCAmelCase , _lowerCAmelCase ): lowerCAmelCase : Dict = text_path elif issubclass(_lowerCAmelCase , _lowerCAmelCase ): lowerCAmelCase : int = [text_path] lowerCAmelCase : List[Any] = tmp_path / "cache" lowerCAmelCase : Optional[int] = {"text": "string"} lowerCAmelCase : Union[str, Any] = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_dataset(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( _snake_case : Dict , _snake_case : int , _snake_case : List[Any]=("train",) ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) for split in splits: lowerCAmelCase : Dict = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def _snake_case ( _snake_case : Tuple , _snake_case : Optional[Any] , _snake_case : Any ) -> Dict: lowerCAmelCase : Optional[int] = tmp_path / "cache" lowerCAmelCase : Dict = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase : Dict = TextDatasetReader({'''train''': text_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''text''': '''string'''}, {'''text''': '''int32'''}, {'''text''': '''float32'''}, ] , ) def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : int ) -> str: lowerCAmelCase : List[Any] = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" lowerCAmelCase : Optional[int] = {"text": "string"} lowerCAmelCase : Optional[int] = features.copy() if features else default_expected_features lowerCAmelCase : str = ( Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase : Tuple = TextDatasetReader({'''train''': text_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def _snake_case ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Union[str, Any] ) -> int: if split: lowerCAmelCase : Tuple = {split: text_path} else: lowerCAmelCase : List[str] = "train" lowerCAmelCase : List[str] = {"train": text_path, "test": text_path} lowerCAmelCase : str = tmp_path / "cache" lowerCAmelCase : int = {"text": "string"} lowerCAmelCase : str = TextDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read() _check_text_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
711
"""simple docstring""" snake_case__ : List[Any] = '''Tobias Carryer''' from time import time class snake_case_: def __init__( self : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=int(time() ) ): # noqa: B008 lowerCAmelCase : str = multiplier lowerCAmelCase : Optional[int] = increment lowerCAmelCase : Optional[Any] = modulo lowerCAmelCase : Optional[Any] = seed def lowerCamelCase__ ( self : Union[str, Any] ): lowerCAmelCase : Optional[int] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. snake_case__ : int = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31) while True: print(lcg.next_number())
637
0