code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import argparse import requests import torch from PIL import Image from torchvision.transforms import Compose, Normalize, Resize, ToTensor from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict: __lowerCamelCase : List[Any] = SwinaSRConfig() if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowerCamelCase : Optional[int] = 4 elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: __lowerCamelCase : int = 4 __lowerCamelCase : List[str] = 4_8 __lowerCamelCase : Any = 'pixelshuffle_aux' elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowerCamelCase : str = [6, 6, 6, 6] __lowerCamelCase : Optional[int] = 6_0 __lowerCamelCase : Union[str, Any] = [6, 6, 6, 6] __lowerCamelCase : List[str] = 'pixelshuffledirect' elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowerCamelCase : Optional[Any] = 4 __lowerCamelCase : List[Any] = 'nearest+conv' elif "Swin2SR_Jpeg_dynamic" in checkpoint_url: __lowerCamelCase : Tuple = 1 __lowerCamelCase : Union[str, Any] = 1 __lowerCamelCase : List[str] = 1_2_6 __lowerCamelCase : List[Any] = 7 __lowerCamelCase : Tuple = 255.0 __lowerCamelCase : Any = '' return config def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict: if "patch_embed.proj" in name and "layers" not in name: __lowerCamelCase : Dict = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: __lowerCamelCase : Tuple = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' ) if "layers" in name: __lowerCamelCase : str = name.replace('layers' , 'encoder.stages' ) if "residual_group.blocks" in name: __lowerCamelCase : Tuple = name.replace('residual_group.blocks' , 'layers' ) if "attn.proj" in name: __lowerCamelCase : Tuple = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: __lowerCamelCase : str = name.replace('attn' , 'attention.self' ) if "norm1" in name: __lowerCamelCase : str = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: __lowerCamelCase : Dict = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: __lowerCamelCase : Optional[int] = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: __lowerCamelCase : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' ) if "q_bias" in name: __lowerCamelCase : Any = name.replace('q_bias' , 'query.bias' ) if "k_bias" in name: __lowerCamelCase : Optional[Any] = name.replace('k_bias' , 'key.bias' ) if "v_bias" in name: __lowerCamelCase : int = name.replace('v_bias' , 'value.bias' ) if "cpb_mlp" in name: __lowerCamelCase : List[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' ) if "patch_embed.proj" in name: __lowerCamelCase : str = name.replace('patch_embed.proj' , 'patch_embed.projection' ) if name == "norm.weight": __lowerCamelCase : Dict = 'layernorm.weight' if name == "norm.bias": __lowerCamelCase : Dict = 'layernorm.bias' if "conv_first" in name: __lowerCamelCase : Optional[int] = name.replace('conv_first' , 'first_convolution' ) if ( "upsample" in name or "conv_before_upsample" in name or "conv_bicubic" in name or "conv_up" in name or "conv_hr" in name or "conv_last" in name or "aux" in name ): # heads if "conv_last" in name: __lowerCamelCase : Any = name.replace('conv_last' , 'final_convolution' ) if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]: if "conv_before_upsample.0" in name: __lowerCamelCase : List[Any] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' ) if "upsample.0" in name: __lowerCamelCase : Dict = name.replace('upsample.0' , 'upsample.convolution_0' ) if "upsample.2" in name: __lowerCamelCase : List[Any] = name.replace('upsample.2' , 'upsample.convolution_1' ) __lowerCamelCase : Union[str, Any] = 'upsample.' + name elif config.upsampler == "pixelshuffledirect": __lowerCamelCase : int = name.replace('upsample.0.weight' , 'upsample.conv.weight' ) __lowerCamelCase : List[str] = name.replace('upsample.0.bias' , 'upsample.conv.bias' ) else: pass else: __lowerCamelCase : List[Any] = 'swin2sr.' + name return name def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any: for key in orig_state_dict.copy().keys(): __lowerCamelCase : int = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: __lowerCamelCase : Dict = key.split('.' ) __lowerCamelCase : Dict = int(key_split[1] ) __lowerCamelCase : Optional[Any] = int(key_split[4] ) __lowerCamelCase : List[str] = config.embed_dim if "weight" in key: __lowerCamelCase : Optional[int] = val[:dim, :] __lowerCamelCase : Union[str, Any] = val[dim : dim * 2, :] __lowerCamelCase : str = val[-dim:, :] else: __lowerCamelCase : List[str] = val[:dim] __lowerCamelCase : Union[str, Any] = val[dim : dim * 2] __lowerCamelCase : Union[str, Any] = val[-dim:] pass else: __lowerCamelCase : Dict = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int: __lowerCamelCase : Optional[int] = get_config(lowerCamelCase__ ) __lowerCamelCase : List[Any] = SwinaSRForImageSuperResolution(lowerCamelCase__ ) model.eval() __lowerCamelCase : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location='cpu' ) __lowerCamelCase : Tuple = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase , __lowerCamelCase : Optional[Any] = model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ ) if len(lowerCamelCase__ ) > 0: raise ValueError('Missing keys when converting: {}'.format(lowerCamelCase__ ) ) for key in unexpected_keys: if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key): raise ValueError(F"Unexpected key {key} in state_dict" ) # verify values __lowerCamelCase : str = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true' __lowerCamelCase : int = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('RGB' ) __lowerCamelCase : Union[str, Any] = SwinaSRImageProcessor() # pixel_values = processor(image, return_tensors="pt").pixel_values __lowerCamelCase : str = 1_2_6 if 'Jpeg' in checkpoint_url else 2_5_6 __lowerCamelCase : List[str] = Compose( [ Resize((image_size, image_size) ), ToTensor(), Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __lowerCamelCase : Union[str, Any] = transforms(lowerCamelCase__ ).unsqueeze(0 ) if config.num_channels == 1: __lowerCamelCase : List[str] = pixel_values[:, 0, :, :].unsqueeze(1 ) __lowerCamelCase : Tuple = model(lowerCamelCase__ ) # assert values if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url: __lowerCamelCase : Tuple = torch.Size([1, 3, 5_1_2, 5_1_2] ) __lowerCamelCase : Optional[Any] = torch.tensor( [[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] ) elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url: __lowerCamelCase : List[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __lowerCamelCase : Dict = torch.tensor( [[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] ) elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url: # TODO values didn't match exactly here __lowerCamelCase : Optional[int] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __lowerCamelCase : Dict = torch.tensor( [[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] ) elif "Swin2SR_Lightweight_X2_64" in checkpoint_url: __lowerCamelCase : Optional[Any] = torch.Size([1, 3, 5_1_2, 5_1_2] ) __lowerCamelCase : Union[str, Any] = torch.tensor( [[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] ) elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url: __lowerCamelCase : Optional[Any] = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] ) __lowerCamelCase : List[str] = torch.tensor( [[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] ) assert ( outputs.reconstruction.shape == expected_shape ), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}" assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCamelCase__ , atol=1e-3 ) print('Looks ok!' ) __lowerCamelCase : Optional[Any] = { 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': ( 'swin2SR-classical-sr-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': ( 'swin2SR-classical-sr-x4-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': ( 'swin2SR-compressed-sr-x4-48' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': ( 'swin2SR-lightweight-x2-64' ), 'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': ( 'swin2SR-realworld-sr-x4-64-bsrgan-psnr' ), } __lowerCamelCase : str = url_to_name[checkpoint_url] if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(lowerCamelCase__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: model.push_to_hub(F"caidas/{model_name}" ) processor.push_to_hub(F"caidas/{model_name}" ) if __name__ == "__main__": a =argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""", type=str, help="""URL of the original Swin2SR checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""") a =parser.parse_args() convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
73
import math import random def lowerCAmelCase_ ( __A, __A = False ) -> float: '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value UpperCamelCase__ = 0.0_2 def lowerCAmelCase_ ( __A, __A ) -> float: '''simple docstring''' UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(__A ): # Forward propagation UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCAmelCase__ = (expected / 100) - layer_a # Error delta UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ = int(input('Expected value: ')) UpperCamelCase__ = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
65
0
'''simple docstring''' import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class __UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = 32, lowerCAmelCase = True, lowerCAmelCase = 1 / 255, lowerCAmelCase = True, lowerCAmelCase = True, lowerCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], lowerCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], lowerCAmelCase = True, lowerCAmelCase=7, lowerCAmelCase=30, lowerCAmelCase=400, lowerCAmelCase=3, ): """simple docstring""" lowerCamelCase_ =parent lowerCamelCase_ =do_resize lowerCamelCase_ =size if size is not None else {'''shortest_edge''': 288} lowerCamelCase_ =size_divisor lowerCamelCase_ =do_rescale lowerCamelCase_ =rescale_factor lowerCamelCase_ =do_normalize lowerCamelCase_ =do_center_crop lowerCamelCase_ =image_mean lowerCamelCase_ =image_std lowerCamelCase_ =do_pad lowerCamelCase_ =batch_size lowerCamelCase_ =num_channels lowerCamelCase_ =min_resolution lowerCamelCase_ =max_resolution def lowercase__ ( self ): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False ): """simple docstring""" if not batched: lowerCamelCase_ =self.size['''shortest_edge'''] lowerCamelCase_ =image_inputs[0] if isinstance(lowerCAmelCase, Image.Image ): lowerCamelCase_, lowerCamelCase_ =image.size else: lowerCamelCase_, lowerCamelCase_ =image.shape[1], image.shape[2] lowerCamelCase_ =size / min(lowerCAmelCase, lowerCAmelCase ) if h < w: lowerCamelCase_, lowerCamelCase_ =size, scale * w else: lowerCamelCase_, lowerCamelCase_ =scale * h, size lowerCamelCase_ =int((1_333 / 800) * size ) if max(lowerCAmelCase, lowerCAmelCase ) > max_size: lowerCamelCase_ =max_size / max(lowerCAmelCase, lowerCAmelCase ) lowerCamelCase_ =newh * scale lowerCamelCase_ =neww * scale lowerCamelCase_, lowerCamelCase_ =int(newh + 0.5 ), int(neww + 0.5 ) lowerCamelCase_, lowerCamelCase_ =( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: lowerCamelCase_ =[] for image in image_inputs: lowerCamelCase_, lowerCamelCase_ =self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[0] )[0] lowerCamelCase_ =max(lowerCAmelCase, key=lambda lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ): lowercase : Optional[Any] =BridgeTowerImageProcessor if is_vision_available() else None def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =BridgeTowerImageProcessingTester(self ) @property def lowercase__ ( self ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase, '''image_mean''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''image_std''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_normalize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''do_resize''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size''' ) ) self.assertTrue(hasattr(lowerCAmelCase, '''size_divisor''' ) ) def lowercase__ ( self ): """simple docstring""" pass def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, Image.Image ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, numpify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, np.ndarray ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase_ =prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase, torchify=lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase, torch.Tensor ) # Test not batched input lowerCamelCase_ =image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched lowerCamelCase_ =image_processing(lowerCAmelCase, return_tensors='''pt''' ).pixel_values lowerCamelCase_, lowerCamelCase_ =self.image_processor_tester.get_expected_values(lowerCAmelCase, batched=lowerCAmelCase ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), )
369
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def a_ ( __snake_case : Optional[int] ) -> List[str]: """simple docstring""" lowerCamelCase_ =[ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def a_ ( __snake_case : List[Any] ) -> int: """simple docstring""" lowerCamelCase_, lowerCamelCase_ =emb.weight.shape lowerCamelCase_ =nn.Linear(__snake_case , __snake_case , bias=__snake_case ) lowerCamelCase_ =emb.weight.data return lin_layer def a_ ( __snake_case : Union[str, Any] , __snake_case : Tuple=None ) -> Dict: """simple docstring""" lowerCamelCase_ ={} for old_key in state_dict.keys(): lowerCamelCase_ =old_key if "moe_layer.experts." in key: if expert_idx is not None: lowerCamelCase_ =key.replace('''moe_layer.experts.0''' , F'''ffn.experts.expert_{expert_idx}''' ) else: lowerCamelCase_ =key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' ) if "gate" in key: lowerCamelCase_ =key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' ) if "fc2" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc2.''' , '''.ffn.fc2.''' ) if "fc1" and "experts" not in key: lowerCamelCase_ =key.replace('''.fc1.''' , '''.ffn.fc1.''' ) if ".encoder_attn." in key: lowerCamelCase_ =key.replace('''.encoder_attn.''' , '''.cross_attention.''' ) if "encoder_attn_layer_norm" in key: lowerCamelCase_ =key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' ) if "final_layer_norm" in key: lowerCamelCase_ =key.replace('''final_layer_norm''' , '''ff_layer_norm''' ) lowerCamelCase_ =state_dict[old_key] return new_dict def a_ ( __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str = WEIGHTS_NAME ) -> Dict: """simple docstring""" lowerCamelCase_ =[] lowerCamelCase_ =0 os.makedirs(__snake_case , exist_ok=__snake_case ) for expert in range(__snake_case ): lowerCamelCase_ =switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(__snake_case ): lowerCamelCase_ =torch.load(__snake_case )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =os.path.join( __snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) torch.save(__snake_case , __snake_case ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(__snake_case )[0]].dtype ) # Add the last block lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{len(__snake_case )+1:05d}-of-???.bin''' ) ) lowerCamelCase_ =torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model'''] remove_ignore_keys_(__snake_case ) lowerCamelCase_ =rename_fairseq_keys(__snake_case , __snake_case ) lowerCamelCase_ =shared_weights['''decoder.embed_tokens.weight'''] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(__snake_case ) == 1: lowerCamelCase_ =os.path.join(__snake_case , __snake_case ) torch.save(__snake_case , __snake_case ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(__snake_case , __snake_case ) # Otherwise, let's build the index lowerCamelCase_ ={} for idx, shard in enumerate(__snake_case ): lowerCamelCase_ =weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-{len(__snake_case ):05d}.bin''' ) lowerCamelCase_ =os.path.join(__snake_case , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) ) for key in shard: lowerCamelCase_ =shard_file # Add the metadata lowerCamelCase_ ={'''total_size''': total_size} lowerCamelCase_ ={'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__snake_case , __snake_case ) , '''w''' , encoding='''utf-8''' ) as f: lowerCamelCase_ =json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + '''\n''' f.write(__snake_case ) return metadata, index if __name__ == "__main__": a_ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a_ : Tuple = parser.parse_args() a_ , a_ : int = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_28, args.dtype, ) a_ : Tuple = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_28 ) config.save_pretrained(args.pytorch_dump_folder_path) a_ : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
6
0
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case__ : int = logging.get_logger(__name__) snake_case__ : Optional[int] = { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } snake_case__ : str = { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } snake_case__ : Optional[Any] = {'''facebook/blenderbot-3B''': 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _snake_case ( ): lowerCAmelCase : int = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) lowerCAmelCase : Any = bs[:] lowerCAmelCase : Tuple = 0 for b in range(2**8 ): if b not in bs: bs.append(A_ ) cs.append(2**8 + n ) n += 1 lowerCAmelCase : Optional[Any] = [chr(A_ ) for n in cs] return dict(zip(A_ , A_ ) ) def _snake_case ( _snake_case : Any ): lowerCAmelCase : List[Any] = set() lowerCAmelCase : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase : int = char return pairs class snake_case_( _a ): __UpperCamelCase = VOCAB_FILES_NAMES __UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase = ["""input_ids""", """attention_mask"""] def __init__( self : Dict , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict="replace" , UpperCamelCase_ : Tuple="<s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : Optional[int]="<pad>" , UpperCamelCase_ : Optional[Any]="<mask>" , UpperCamelCase_ : Any=False , **UpperCamelCase_ : List[Any] , ): lowerCAmelCase : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token lowerCAmelCase : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token lowerCAmelCase : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token lowerCAmelCase : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token lowerCAmelCase : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token lowerCAmelCase : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , ) with open(__UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle: lowerCAmelCase : str = json.load(__UpperCAmelCase ) lowerCAmelCase : str = {v: k for k, v in self.encoder.items()} lowerCAmelCase : str = errors # how to handle errors in decoding lowerCAmelCase : str = bytes_to_unicode() lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCAmelCase , encoding='''utf-8''' ) as merges_handle: lowerCAmelCase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1] lowerCAmelCase : List[str] = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase : Optional[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) lowerCAmelCase : Any = {} lowerCAmelCase : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase : str = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCamelCase__ ( self : Tuple ): return len(self.encoder ) def lowerCamelCase__ ( self : Optional[Any] ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : int , UpperCamelCase_ : str ): if token in self.cache: return self.cache[token] lowerCAmelCase : str = tuple(__UpperCAmelCase ) lowerCAmelCase : Dict = get_pairs(__UpperCAmelCase ) if not pairs: return token while True: lowerCAmelCase : Optional[int] = min(__UpperCAmelCase , key=lambda UpperCamelCase_ : self.bpe_ranks.get(__UpperCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase : str = bigram lowerCAmelCase : Optional[int] = [] lowerCAmelCase : List[str] = 0 while i < len(__UpperCAmelCase ): try: lowerCAmelCase : List[str] = word.index(__UpperCAmelCase , __UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase : int = j if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase : int = tuple(__UpperCAmelCase ) lowerCAmelCase : List[Any] = new_word if len(__UpperCAmelCase ) == 1: break else: lowerCAmelCase : Dict = get_pairs(__UpperCAmelCase ) lowerCAmelCase : Dict = " ".join(__UpperCAmelCase ) lowerCAmelCase : str = word return word def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : Optional[Any] ): lowerCAmelCase : Optional[Any] = [] for token in re.findall(self.pat , __UpperCAmelCase ): lowerCAmelCase : Dict = "".join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(''' ''' ) ) return bpe_tokens def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[Any] ): return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : Union[str, Any] ): return self.decoder.get(__UpperCAmelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ): lowerCAmelCase : Tuple = "".join(__UpperCAmelCase ) lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ): if not os.path.isdir(__UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowerCAmelCase : Dict = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCAmelCase : int = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + '''\n''' ) lowerCAmelCase : str = 0 with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) lowerCAmelCase : List[str] = token_index writer.write(''' '''.join(__UpperCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCAmelCase )) + [1] return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1] def lowerCamelCase__ ( self : Any , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): lowerCAmelCase : Union[str, Any] = [self.sep_token_id] lowerCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any]=False , **UpperCamelCase_ : List[Any] ): lowerCAmelCase : Any = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()): lowerCAmelCase : List[Any] = " " + text return (text, kwargs) def lowerCamelCase__ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : "Conversation" ): lowerCAmelCase : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCAmelCase ) lowerCAmelCase : Tuple = " ".join(__UpperCAmelCase ) lowerCAmelCase : Optional[Any] = self.encode(__UpperCAmelCase ) if len(__UpperCAmelCase ) > self.model_max_length: lowerCAmelCase : int = input_ids[-self.model_max_length :] logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
60
"""simple docstring""" def lowercase ( A_ )-> str: '''simple docstring''' if isinstance(A_ , A_ ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(A_ , A_ ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" a : Optional[Any] = False if num < 0: a : Tuple = True a : str = -num a : list[int] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(A_ ) for e in binary ) return "0b" + "".join(str(A_ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
40
0
import math import sys import cva import numpy as np def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = math.sqrt(__lowerCAmelCase ) SCREAMING_SNAKE_CASE = 1 / (sigma * math.sqrt(2 * math.pi )) return cons * np.exp(-((img / sigma) ** 2) * 0.5 ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = kernel_size // 2 return img[x - half : x + half + 1, y - half : y + half + 1] def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = np.zeros((kernel_size, kernel_size) ) for i in range(0 , __lowerCAmelCase ): for j in range(0 , __lowerCAmelCase ): SCREAMING_SNAKE_CASE = math.sqrt( abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 ) return vec_gaussian(__lowerCAmelCase , __lowerCAmelCase ) def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = np.zeros(img.shape ) SCREAMING_SNAKE_CASE = get_gauss_kernel(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE = img.shape for i in range(kernel_size // 2 , size_x - kernel_size // 2 ): for j in range(kernel_size // 2 , size_y - kernel_size // 2 ): SCREAMING_SNAKE_CASE = get_slice(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE = img_s - img_s[kernel_size // 2, kernel_size // 2] SCREAMING_SNAKE_CASE = vec_gaussian(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE = np.multiply(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE = np.multiply(__lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE = np.sum(__lowerCAmelCase ) / np.sum(__lowerCAmelCase ) SCREAMING_SNAKE_CASE = val return imga def __lowercase ( _SCREAMING_SNAKE_CASE ) -> tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = args[1] if args[1:] else '''../image_data/lena.jpg''' SCREAMING_SNAKE_CASE = float(args[2] ) if args[2:] else 1.0 SCREAMING_SNAKE_CASE = float(args[3] ) if args[3:] else 1.0 if args[4:]: SCREAMING_SNAKE_CASE = int(args[4] ) SCREAMING_SNAKE_CASE = kernel_size + abs(kernel_size % 2 - 1 ) else: SCREAMING_SNAKE_CASE = 5 return filename, spatial_variance, intensity_variance, kernel_size if __name__ == "__main__": SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = parse_args(sys.argv) SCREAMING_SNAKE_CASE_ = cva.imread(filename, 0) cva.imshow("""input image""", img) SCREAMING_SNAKE_CASE_ = img / 2_5_5 SCREAMING_SNAKE_CASE_ = out.astype("""float32""") SCREAMING_SNAKE_CASE_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size) SCREAMING_SNAKE_CASE_ = out * 2_5_5 SCREAMING_SNAKE_CASE_ = np.uinta(out) cva.imshow("""output image""", out) cva.waitKey(0) cva.destroyAllWindows()
350
import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Optional[int] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=3 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : List[Any]=3 ,lowerCamelCase__ : str=10 ,lowerCamelCase__ : Any=[10, 20, 30, 40] ,lowerCamelCase__ : Optional[Any]=[1, 1, 2, 1] ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Tuple="relu" ,lowerCamelCase__ : Dict=3 ,lowerCamelCase__ : Optional[int]=None ,) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = embeddings_size SCREAMING_SNAKE_CASE = hidden_sizes SCREAMING_SNAKE_CASE = depths SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = len(lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE = self.get_config() return config, pixel_values def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,image_size=self.image_size ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetModel(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification(config=lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = config_and_inputs SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class UpperCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): '''simple docstring''' __snake_case : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __snake_case : Tuple = False __snake_case : int = False __snake_case : Tuple = False def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[str]: '''simple docstring''' return def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @unittest.skip(reason="""RegNet does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' pass @unittest.skip(reason="""RegNet does not support input and output embeddings""" ) def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any: '''simple docstring''' pass def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int: '''simple docstring''' def check_hidden_states_output(lowerCamelCase__ : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int] ): SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) ) SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase__ ) ,expected_num_stages + 1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE = True check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ ) @jax.jit def model_jitted(lowerCamelCase__ : Dict ,**lowerCamelCase__ : Optional[Any] ): return model(pixel_values=lowerCamelCase__ ,**lowerCamelCase__ ) with self.subTest("""JIT Enabled""" ): SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase__ ).to_tuple() self.assertEqual(len(lowerCamelCase__ ) ,len(lowerCamelCase__ ) ) for jitted_output, output in zip(lowerCamelCase__ ,lowerCamelCase__ ): self.assertEqual(jitted_output.shape ,output.shape ) def __lowercase ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_flax class UpperCamelCase__ ( unittest.TestCase ): '''simple docstring''' @cached_property def SCREAMING_SNAKE_CASE__ ( self : int ) -> str: '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" ) SCREAMING_SNAKE_CASE = self.default_image_processor SCREAMING_SNAKE_CASE = prepare_img() SCREAMING_SNAKE_CASE = image_processor(images=lowerCamelCase__ ,return_tensors="""np""" ) SCREAMING_SNAKE_CASE = model(**lowerCamelCase__ ) # verify the logits SCREAMING_SNAKE_CASE = (1, 1000) self.assertEqual(outputs.logits.shape ,lowerCamelCase__ ) SCREAMING_SNAKE_CASE = jnp.array([-0.4180, -1.5051, -3.4836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1e-4 ) )
193
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Any = dataset UpperCAmelCase_ : Dict = process UpperCAmelCase_ : List[Any] = params def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = self.dataset[i] UpperCAmelCase_ : Tuple = self.process(lowercase_ , **self.params ) return processed class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ): """simple docstring""" UpperCAmelCase_ : Optional[int] = loader UpperCAmelCase_ : Optional[int] = infer UpperCAmelCase_ : List[str] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : str = loader_batch_size # Internal bookkeeping UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Union[str, Any] = None def __len__( self ): """simple docstring""" return len(self.loader ) def __iter__( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = iter(self.loader ) return self def UpperCamelCase__ ( self ): """simple docstring""" if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice UpperCAmelCase_ : str = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) UpperCAmelCase_ : List[Any] = {} for k, element in self._loader_batch_data.items(): if isinstance(lowercase_ , lowercase_ ): # Convert ModelOutput to tuple first UpperCAmelCase_ : List[str] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): UpperCAmelCase_ : int = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowercase_ , lowercase_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): UpperCAmelCase_ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase_ : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around UpperCAmelCase_ : int = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase_ : Optional[int] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase_ : str = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. UpperCAmelCase_ : str = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 UpperCAmelCase_ : Union[str, Any] = self._loader_batch_data.__class__(lowercase_ ) self._loader_batch_index += 1 return result def UpperCamelCase__ ( self ): """simple docstring""" if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch UpperCAmelCase_ : Dict = next(self.iterator ) UpperCAmelCase_ : Union[str, Any] = self.infer(lowercase_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowercase_ , torch.Tensor ): UpperCAmelCase_ : Union[str, Any] = processed else: UpperCAmelCase_ : str = list(processed.keys() )[0] UpperCAmelCase_ : Optional[Any] = processed[key] if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : str = len(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase_ : str = observed_batch_size # Setting internal index to unwrap the batch UpperCAmelCase_ : Dict = processed UpperCAmelCase_ : Union[str, Any] = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ): """simple docstring""" super().__init__(lowercase_ , lowercase_ , lowercase_ ) def __iter__( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = iter(self.loader ) UpperCAmelCase_ : Dict = None return self def UpperCamelCase__ ( self ): """simple docstring""" if self.subiterator is None: UpperCAmelCase_ : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item UpperCAmelCase_ : Any = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators UpperCAmelCase_ : Tuple = self.infer(next(self.iterator ) , **self.params ) UpperCAmelCase_ : Dict = next(self.subiterator ) return processed class A_ (lowercase__ ): '''simple docstring''' def __iter__( self ): """simple docstring""" UpperCAmelCase_ : List[Any] = iter(self.loader ) return self def UpperCamelCase__ ( self ): """simple docstring""" # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase_ : Union[str, Any] = self.loader_batch_item() UpperCAmelCase_ : Dict = item.pop("is_last" ) accumulator.append(lowercase_ ) if is_last: return accumulator while not is_last: UpperCAmelCase_ : Optional[int] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(lowercase_ , torch.Tensor ): UpperCAmelCase_ : Optional[int] = processed else: UpperCAmelCase_ : Any = list(processed.keys() )[0] UpperCAmelCase_ : str = processed[key] if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase_ : List[Any] = len(lowercase_ ) else: UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase_ : Tuple = observed_batch_size UpperCAmelCase_ : List[Any] = processed UpperCAmelCase_ : List[Any] = 0 while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase_ : Dict = self.loader_batch_item() UpperCAmelCase_ : Dict = item.pop("is_last" ) accumulator.append(lowercase_ ) if is_last: return accumulator else: UpperCAmelCase_ : List[Any] = processed UpperCAmelCase_ : Optional[Any] = item.pop("is_last" ) accumulator.append(lowercase_ ) return accumulator class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[str] = dataset UpperCAmelCase_ : Dict = key def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , lowercase_ ): """simple docstring""" return self.dataset[i][self.key] class A_ (lowercase__ ): '''simple docstring''' def __init__( self , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : int = dataset UpperCAmelCase_ : Any = keya UpperCAmelCase_ : str = keya def __len__( self ): """simple docstring""" return len(self.dataset ) def __getitem__( self , lowercase_ ): """simple docstring""" return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
61
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation import warnings from .state import AcceleratorState, GradientState warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""") class A__ : def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]: """simple docstring""" __lowercase = scheduler __lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers] __lowercase = split_batches __lowercase = step_with_optimizer __lowercase = GradientState() def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]: """simple docstring""" if not self.step_with_optimizer: # No link between scheduler and optimizer -> just step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) return # Otherwise, first make sure the optimizer was stepped. if not self.gradient_state.sync_gradients: if self.gradient_state.adjust_scheduler: self.scheduler._step_count += 1 return for opt in self.optimizers: if opt.step_was_skipped: return if self.split_batches: # Split batches -> the training dataloader batch size is not changed so one step per training step self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: # Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do # num_processes steps per training step __lowercase = AcceleratorState().num_processes for _ in range(_UpperCAmelCase ): # Special case when using OneCycle and `drop_last` was not used if hasattr(self.scheduler , 'total_steps' ): if self.scheduler._step_count <= self.scheduler.total_steps: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) else: self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase ) def a__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return self.scheduler.get_last_lr() def a__ ( self : List[str] ) -> Tuple: """simple docstring""" return self.scheduler.state_dict() def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.scheduler.load_state_dict(_UpperCAmelCase ) def a__ ( self : Dict ) -> int: """simple docstring""" return self.scheduler.get_lr() def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any: """simple docstring""" return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
325
0
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : List[str] = CLIPTokenizer lowerCamelCase_ : Dict = CLIPTokenizerFast lowerCamelCase_ : str = True lowerCamelCase_ : str = {} lowerCamelCase_ : List[Any] = False def lowerCamelCase (self ) -> int: '''simple docstring''' super().setUp() # fmt: off snake_case_ : Optional[int] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>'''] snake_case_ : Dict = {'''unk_token''': '''<unk>'''} snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , **__magic_name__ ) -> Dict: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , **__magic_name__ ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ ) def lowerCamelCase (self , __magic_name__ ) -> Any: '''simple docstring''' snake_case_ : Dict = '''lower newer''' snake_case_ : str = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ : Union[str, Any] = '''lower newer''' snake_case_ : List[Any] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>'''] snake_case_ : List[str] = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : int = tokens + [tokenizer.unk_token] snake_case_ : Dict = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @require_ftfy def lowerCamelCase (self ) -> Any: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : Union[str, Any] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) snake_case_ : str = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ ) snake_case_ : Union[str, Any] = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.''' snake_case_ : Tuple = tokenizer_s.tokenize(__magic_name__ ) snake_case_ : List[Any] = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways snake_case_ : str = '''xa\u0303y''' + ''' ''' + '''x\xe3y''' snake_case_ : Optional[int] = tokenizer_s.tokenize(__magic_name__ ) snake_case_ : str = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that the tokenization is identical on unicode of space type snake_case_ : Union[str, Any] = [ '''\u0009''', # (horizontal tab, '\t') '''\u000B''', # (vertical tab) '''\u000C''', # (form feed) '''\u0020''', # (space, ' ') '''\u200E''', # (left-to-right mark):w '''\u200F''', # (right-to-left mark) ] for unicode_seq in spaces_unicodes: snake_case_ : Any = tokenizer_s.tokenize(__magic_name__ ) snake_case_ : List[str] = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) # Test that the tokenization is identical on unicode of line break type snake_case_ : Optional[int] = [ '''\u000A''', # (line feed, '\n') '''\r\n''', # (carriage return and line feed, '\r\n') '''\u000D''', # (carriage return, '\r') '''\r''', # (carriage return, '\r') '''\u000D''', # (carriage return, '\r') '''\u2028''', # (line separator) '''\u2029''', # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: snake_case_ : int = tokenizer_s.tokenize(__magic_name__ ) snake_case_ : Any = tokenizer_r.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ : List[str] = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ : int = self.rust_tokenizer_class.from_pretrained( __magic_name__ , use_fast=__magic_name__ , ) snake_case_ : Union[str, Any] = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(__magic_name__ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(__magic_name__ ) + 1, len(__magic_name__ ) + 1 + len(__magic_name__ )) , ) snake_case_ : List[str] = F''' {text}''' snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained( __magic_name__ , use_fast=__magic_name__ , ) snake_case_ : int = tokenizer_r(__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__magic_name__ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(__magic_name__ ) + 1, 1 + len(__magic_name__ ) + 1 + len(__magic_name__ )) , ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' with self.assertRaises(__magic_name__ ) as context: self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' ) self.assertTrue( context.exception.args[0].startswith( '''The `backend_tokenizer` provided does not match the expected format.''' ) ) @require_ftfy def lowerCamelCase (self ) -> int: '''simple docstring''' super().test_tokenization_python_rust_equals() def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass
352
def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" if number > 0: raise ValueError('''input must be a negative integer''' ) snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] ) snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:] snake_case_ : Dict = ( ( '''1''' + '''0''' * (binary_number_length - len(_UpperCamelCase )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
279
0
import datasets from .evaluate import evaluate _UpperCAmelCase : Dict = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n" _UpperCAmelCase : Any = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n" _UpperCAmelCase : str = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the CUAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\n 'aupr': Area Under the Precision-Recall curve\n 'prec_at_80_recall': Precision at 80% recall\n 'prec_at_90_recall': Precision at 90% recall\nExamples:\n >>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): def a ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': { 'id': datasets.Value('string' ), 'prediction_text': datasets.features.Sequence(datasets.Value('string' ) ), }, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://www.atticusprojectai.org/cuad'] , reference_urls=['https://www.atticusprojectai.org/cuad'] , ) def a ( self , snake_case , snake_case ): snake_case_ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} snake_case_ = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] snake_case_ = evaluate(dataset=__lowerCamelCase , predictions=__lowerCamelCase ) return score
285
from ...configuration_utils import PretrainedConfig from ...utils import logging A : str = logging.get_logger(__name__) A : int = { "alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json", } class _lowercase ( lowercase__): """simple docstring""" A__ = "mgp-str" def __init__( self : List[str] , __lowerCamelCase : List[Any]=[32, 128] , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=3 , __lowerCamelCase : Optional[Any]=27 , __lowerCamelCase : List[str]=38 , __lowerCamelCase : Dict=50257 , __lowerCamelCase : List[Any]=30522 , __lowerCamelCase : Optional[Any]=768 , __lowerCamelCase : str=12 , __lowerCamelCase : Optional[int]=12 , __lowerCamelCase : List[str]=4.0 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Dict=1E-5 , __lowerCamelCase : int=0.0 , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : str=0.0_2 , **__lowerCamelCase : Dict , ): '''simple docstring''' super().__init__(**__lowerCamelCase ) lowerCamelCase__ : int = image_size lowerCamelCase__ : Union[str, Any] = patch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : Union[str, Any] = max_token_length lowerCamelCase__ : Optional[int] = num_character_labels lowerCamelCase__ : Union[str, Any] = num_bpe_labels lowerCamelCase__ : Optional[int] = num_wordpiece_labels lowerCamelCase__ : List[str] = hidden_size lowerCamelCase__ : Dict = num_hidden_layers lowerCamelCase__ : Any = num_attention_heads lowerCamelCase__ : Any = mlp_ratio lowerCamelCase__ : List[Any] = distilled lowerCamelCase__ : Optional[Any] = layer_norm_eps lowerCamelCase__ : Union[str, Any] = drop_rate lowerCamelCase__ : List[Any] = qkv_bias lowerCamelCase__ : int = attn_drop_rate lowerCamelCase__ : List[Any] = drop_path_rate lowerCamelCase__ : List[str] = output_aa_attentions lowerCamelCase__ : Dict = initializer_range
184
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __snake_case = data_utils.TransfoXLTokenizer __snake_case = data_utils.TransfoXLCorpus __snake_case = data_utils __snake_case = data_utils def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> List[str]: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__lowerCAmelCase , '''rb''' ) as fp: UpperCAmelCase : Dict =pickle.load(__lowerCAmelCase , encoding='''latin1''' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCAmelCase : Dict =pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(f'''Save vocabulary to {pytorch_vocab_dump_path}''' ) UpperCAmelCase : str =corpus.vocab.__dict__ torch.save(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : List[Any] =corpus.__dict__ corpus_dict_no_vocab.pop('''vocab''' , __lowerCAmelCase ) UpperCAmelCase : int =pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(f'''Save dataset to {pytorch_dataset_dump_path}''' ) torch.save(__lowerCAmelCase , __lowerCAmelCase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCAmelCase : Any =os.path.abspath(__lowerCAmelCase ) UpperCAmelCase : List[Any] =os.path.abspath(__lowerCAmelCase ) print(f'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCAmelCase : Union[str, Any] =TransfoXLConfig() else: UpperCAmelCase : Dict =TransfoXLConfig.from_json_file(__lowerCAmelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) UpperCAmelCase : str =TransfoXLLMHeadModel(__lowerCAmelCase ) UpperCAmelCase : List[Any] =load_tf_weights_in_transfo_xl(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model UpperCAmelCase : Tuple =os.path.join(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Tuple =os.path.join(__lowerCAmelCase , __lowerCAmelCase ) print(f'''Save PyTorch model to {os.path.abspath(__lowerCAmelCase )}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) print(f'''Save configuration file to {os.path.abspath(__lowerCAmelCase )}''' ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) __snake_case = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
78
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge __snake_case = [ '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] __snake_case = [ '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''' ''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def lowerCAmelCase_ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2''', '''rougeL'''] ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , bootstrap_aggregation=__lowerCAmelCase , rouge_keys=['''rouge2'''] ) assert ( pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean() ) def lowerCAmelCase_ ( )-> Dict: '''simple docstring''' UpperCAmelCase : Any ='''rougeLsum''' UpperCAmelCase : Optional[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k] UpperCAmelCase : List[Any] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=[k] )[k] assert score > score_no_sep def lowerCAmelCase_ ( )-> Any: '''simple docstring''' UpperCAmelCase : str =['''rouge1''', '''rouge2''', '''rougeL'''] UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase ) UpperCAmelCase : Tuple =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase , rouge_keys=__lowerCAmelCase ) assert score_sep == score_no_sep def lowerCAmelCase_ ( )-> Dict: '''simple docstring''' UpperCAmelCase : int =[ '''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''', '''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''', ] UpperCAmelCase : Any =[ '''Margot Frank, died in 1945, a month earlier than previously thought.''', '''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of''' ''' the final seconds on board Flight 9525.''', ] assert calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) == calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , newline_sep=__lowerCAmelCase ) def lowerCAmelCase_ ( )-> List[str]: '''simple docstring''' UpperCAmelCase : Union[str, Any] =[ '''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" ''' ] UpperCAmelCase : Optional[Any] =[ ''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .''' ] UpperCAmelCase : Optional[int] =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] , newline_sep=__lowerCAmelCase )['''rougeLsum'''] UpperCAmelCase : int =calculate_rouge(__lowerCAmelCase , __lowerCAmelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum'''] assert new_score > prev_score def lowerCAmelCase_ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase : List[Any] =Path('''examples/seq2seq/test_data/wmt_en_ro''' ) UpperCAmelCase : Tuple =calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase ) UpperCAmelCase : Dict =calculate_rouge_path( data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=__lowerCAmelCase ) assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
78
1
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase ) ->bool: """simple docstring""" a_ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
243
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def UpperCamelCase ( UpperCAmelCase ) ->List[Any]: """simple docstring""" def is_in_circle(UpperCAmelCase , UpperCAmelCase ) -> bool: a_ = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle a_ = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(UpperCAmelCase ) ) # The ratio of the area for circle to square is pi/4. a_ = proportion * 4 print(F'''The estimated value of pi is {pi_estimate}''' ) print(F'''The numpy value of pi is {pi}''' ) print(F'''The total error is {abs(pi - pi_estimate )}''' ) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 , ) ->float: """simple docstring""" return mean( function_to_integrate(uniform(UpperCAmelCase , UpperCAmelCase ) ) for _ in range(UpperCAmelCase ) ) * (max_value - min_value) def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase = 0.0 , UpperCAmelCase = 1.0 ) ->None: """simple docstring""" def identity_function(UpperCAmelCase ) -> float: return x a_ = area_under_curve_estimator( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) a_ = (max_value * max_value - min_value * min_value) / 2 print("******************" ) print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {expected_value}''' ) print(F'''Total error is {abs(estimated_value - expected_value )}''' ) print("******************" ) def UpperCamelCase ( UpperCAmelCase ) ->None: """simple docstring""" def function_to_integrate(UpperCAmelCase ) -> float: return sqrt(4.0 - x * x ) a_ = area_under_curve_estimator( UpperCAmelCase , UpperCAmelCase , 0.0 , 2.0 ) print("******************" ) print("Estimating pi using area_under_curve_estimator" ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {pi}''' ) print(F'''Total error is {abs(estimated_value - pi )}''' ) print("******************" ) if __name__ == "__main__": import doctest doctest.testmod()
243
1
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
360
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): '''simple docstring''' _lowerCAmelCase : Dict = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") _lowerCAmelCase : Tuple = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(_lowerCamelCase ): os.makedirs(_lowerCamelCase ) _lowerCAmelCase : Any = model.state_dict() def to_tf_var_name(_lowerCamelCase ): for patt, repl in iter(_lowerCamelCase ): _lowerCAmelCase : str = name.replace(_lowerCamelCase , _lowerCamelCase ) return F"bert/{name}" def create_tf_var(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ): _lowerCAmelCase : Optional[Any] = tf.dtypes.as_dtype(tensor.dtype ) _lowerCAmelCase : Optional[int] = tf.get_variable(dtype=_lowerCamelCase , shape=tensor.shape , name=_lowerCamelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(_lowerCamelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: _lowerCAmelCase : Optional[Any] = to_tf_var_name(_lowerCamelCase ) _lowerCAmelCase : Any = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): _lowerCAmelCase : Tuple = torch_tensor.T _lowerCAmelCase : str = create_tf_var(tensor=_lowerCamelCase , name=_lowerCamelCase , session=_lowerCamelCase ) tf.keras.backend.set_value(_lowerCamelCase , _lowerCamelCase ) _lowerCAmelCase : Optional[int] = session.run(_lowerCamelCase ) print(F"Successfully created {tf_name}: {np.allclose(_lowerCamelCase , _lowerCamelCase )}" ) _lowerCAmelCase : List[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(_lowerCamelCase , os.path.join(_lowerCamelCase , model_name.replace("-" , "_" ) + ".ckpt" ) ) def A ( _lowerCamelCase=None ): '''simple docstring''' _lowerCAmelCase : int = argparse.ArgumentParser() parser.add_argument("--model_name" , type=_lowerCamelCase , required=_lowerCamelCase , help="model name e.g. bert-base-uncased" ) parser.add_argument( "--cache_dir" , type=_lowerCamelCase , default=_lowerCamelCase , required=_lowerCamelCase , help="Directory containing pytorch model" ) parser.add_argument("--pytorch_model_path" , type=_lowerCamelCase , required=_lowerCamelCase , help="/path/to/<pytorch-model-name>.bin" ) parser.add_argument("--tf_cache_dir" , type=_lowerCamelCase , required=_lowerCamelCase , help="Directory in which to save tensorflow model" ) _lowerCAmelCase : Optional[Any] = parser.parse_args(_lowerCamelCase ) _lowerCAmelCase : List[Any] = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=_lowerCamelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
300
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: int = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_: Dict = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] SCREAMING_SNAKE_CASE_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) SCREAMING_SNAKE_CASE_: Any = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073], "image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711], } SCREAMING_SNAKE_CASE_: Optional[int] = os.path.join(self.tmpdirname , lowerCAmelCase__) with open(self.image_processor_file , "w" , encoding="utf-8") as fp: json.dump(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **lowerCAmelCase__ : Optional[int]): return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase__ : str): return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str] , **lowerCAmelCase__ : Tuple): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): shutil.rmtree(self.tmpdirname) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] SCREAMING_SNAKE_CASE_: Optional[int] = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1)) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : str): SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Optional[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) processor_slow.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: Optional[int] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) processor_fast.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: Tuple = AlignProcessor.from_pretrained(self.tmpdirname) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab()) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab()) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab()) self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__) self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string()) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__) self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) SCREAMING_SNAKE_CASE_: Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)") SCREAMING_SNAKE_CASE_: List[str] = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0) SCREAMING_SNAKE_CASE_: Dict = AlignProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , lowerCAmelCase__) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: Any = self.get_tokenizer() SCREAMING_SNAKE_CASE_: List[str] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: List[Any] = image_processor(lowerCAmelCase__ , return_tensors="np") SCREAMING_SNAKE_CASE_: int = processor(images=lowerCAmelCase__ , return_tensors="np") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: List[str] = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Any = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = "lower newer" SCREAMING_SNAKE_CASE_: Tuple = processor(text=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = tokenizer(lowerCAmelCase__ , padding="max_length" , max_length=64) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: List[str] = self.get_image_processor() SCREAMING_SNAKE_CASE_: str = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Dict = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = "lower newer" SCREAMING_SNAKE_CASE_: Tuple = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: Optional[int] = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__) self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"]) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase__): processor() def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Union[str, Any] = self.get_image_processor() SCREAMING_SNAKE_CASE_: int = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Optional[Any] = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] SCREAMING_SNAKE_CASE_: Optional[Any] = processor.batch_decode(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = tokenizer.batch_decode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__) def _SCREAMING_SNAKE_CASE ( self : List[Any]): SCREAMING_SNAKE_CASE_: Dict = self.get_image_processor() SCREAMING_SNAKE_CASE_: Tuple = self.get_tokenizer() SCREAMING_SNAKE_CASE_: Any = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = "lower newer" SCREAMING_SNAKE_CASE_: str = self.prepare_image_inputs() SCREAMING_SNAKE_CASE_: int = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__) self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
13
def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() # edges = list of graph's edges __UpperCAmelCase = get_edges(SCREAMING_SNAKE_CASE ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __UpperCAmelCase , __UpperCAmelCase = edges.pop() chosen_vertices.add(SCREAMING_SNAKE_CASE ) chosen_vertices.add(SCREAMING_SNAKE_CASE ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(SCREAMING_SNAKE_CASE ) return chosen_vertices def __a ( SCREAMING_SNAKE_CASE ) -> set: '''simple docstring''' __UpperCAmelCase = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
333
0
'''simple docstring''' from typing import Any class lowercase__ : def __init__( self : Dict ,lowerCamelCase__ : Any ): '''simple docstring''' _UpperCamelCase : int = data _UpperCamelCase : Dict = None class lowercase__ : def __init__( self : Any ): '''simple docstring''' _UpperCamelCase : Optional[Any] = None def UpperCamelCase_ ( self : int ): '''simple docstring''' _UpperCamelCase : List[str] = self.head while temp is not None: print(temp.data ,end=' ' ) _UpperCamelCase : List[str] = temp.next print() def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Any ): '''simple docstring''' _UpperCamelCase : Dict = Node(lowerCamelCase__ ) _UpperCamelCase : Tuple = self.head _UpperCamelCase : Optional[Any] = new_node def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : str ): '''simple docstring''' if node_data_a == node_data_a: return else: _UpperCamelCase : List[str] = self.head while node_a is not None and node_a.data != node_data_a: _UpperCamelCase : List[str] = node_a.next _UpperCamelCase : Any = self.head while node_a is not None and node_a.data != node_data_a: _UpperCamelCase : Tuple = node_a.next if node_a is None or node_a is None: return _UpperCamelCase , _UpperCamelCase : List[str] = node_a.data, node_a.data if __name__ == "__main__": snake_case_ : Dict = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
236
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Optional[int] = logging.get_logger(__name__) snake_case_ : List[Any] = { 'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json', 'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class lowercase__ ( lowercase ): lowercase__ = """xlm-roberta-xl""" def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[Any]=250880 ,lowerCamelCase__ : Tuple=2560 ,lowerCamelCase__ : Union[str, Any]=36 ,lowerCamelCase__ : List[str]=32 ,lowerCamelCase__ : Optional[Any]=10240 ,lowerCamelCase__ : Tuple="gelu" ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : int=0.1 ,lowerCamelCase__ : Optional[int]=514 ,lowerCamelCase__ : List[str]=1 ,lowerCamelCase__ : Dict=0.0_2 ,lowerCamelCase__ : Any=1E-05 ,lowerCamelCase__ : Union[str, Any]=1 ,lowerCamelCase__ : str=0 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : Union[str, Any]="absolute" ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : List[str]=None ,**lowerCamelCase__ : Dict ,): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ) _UpperCamelCase : Optional[int] = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : Any = hidden_act _UpperCamelCase : Dict = intermediate_size _UpperCamelCase : Optional[int] = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : List[Any] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : Optional[int] = layer_norm_eps _UpperCamelCase : Optional[int] = position_embedding_type _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : Optional[Any] = classifier_dropout class lowercase__ ( lowercase ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
236
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __magic_name__ ( _UpperCAmelCase, unittest.TestCase): UpperCamelCase__ = LEDTokenizer UpperCamelCase__ = LEDTokenizerFast UpperCamelCase__ = True def SCREAMING_SNAKE_CASE_ ( self : Dict ): super().setUp() lowercase_ : str = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] lowercase_ : Tuple = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase_ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] lowercase_ : List[str] = {"""unk_token""": """<unk>"""} lowercase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowercase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowercase_ ) ) def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : str ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Tuple ): kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] ): return "lower newer", "lower newer" @cached_property def SCREAMING_SNAKE_CASE_ ( self : Any ): return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] lowercase_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase_ : str = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase_ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ): lowercase_ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase_ : Union[str, Any] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="""pt""" ) self.assertIn("""input_ids""" , lowercase_ ) self.assertIn("""attention_mask""" , lowercase_ ) self.assertNotIn("""labels""" , lowercase_ ) self.assertNotIn("""decoder_attention_mask""" , lowercase_ ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): lowercase_ : int = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase_ : Optional[Any] = tokenizer(text_target=lowercase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase_ : Dict = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowercase_ , truncation=lowercase_ , return_tensors="""pt""" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 5122) ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Dict ): lowercase_ : List[str] = ["""A long paragraph for summarization."""] lowercase_ : Optional[Any] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase_ : str = tokenizer(lowercase_ , return_tensors="""pt""" ) lowercase_ : Union[str, Any] = tokenizer(text_target=lowercase_ , return_tensors="""pt""" ) lowercase_ : Tuple = inputs["""input_ids"""] lowercase_ : str = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase_ : Optional[Any] = ["""Summary of the text.""", """Another summary."""] lowercase_ : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase_ : List[str] = tokenizer(lowercase_ , padding=lowercase_ ) lowercase_ : Tuple = [[0] * len(lowercase_ ) for x in encoded_output["""input_ids"""]] lowercase_ : Tuple = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowercase_ ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE_ ( self : List[str] ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : str = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase_ : Union[str, Any] = """A, <mask> AllenNLP sentence.""" lowercase_ : Dict = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase_ : List[Any] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) lowercase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) lowercase_ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
239
'''simple docstring''' import operator as op def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int: lowercase_ : Optional[Any] = [] lowercase_ : str = lambda UpperCAmelCase__ , UpperCAmelCase__ : int(x / y ) # noqa: E731 integer division operation lowercase_ : Optional[Any] = { """^""": op.pow, """*""": op.mul, """/""": div, """+""": op.add, """-""": op.sub, } # operators & their respective operation # print table header print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ ) print("""-""" * (30 + len(UpperCAmelCase__ )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(UpperCAmelCase__ ) # append x to stack # output in tabular format print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) else: lowercase_ : str = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) lowercase_ : Optional[int] = stack.pop() # pop stack # output in tabular format print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ ) stack.append( str(opr[x](int(UpperCAmelCase__ ) , int(UpperCAmelCase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(UpperCAmelCase__ ) , sep=""" | """ , ) return int(stack[0] ) if __name__ == "__main__": _lowercase : Tuple = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
239
1
"""simple docstring""" import itertools import math def a__ ( snake_case__ ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( ) -> Tuple: lowerCamelCase = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a__ ( snake_case__ = 1_00_01 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
356
"""simple docstring""" import math import random def a__ ( snake_case__ , snake_case__ = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCAmelCase : Dict = 0.0_2 def a__ ( snake_case__ , snake_case__ ) -> float: lowerCamelCase = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowerCamelCase = (expected / 1_00) - layer_a # Error delta lowerCamelCase = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Any = int(input("""Expected value: """)) lowerCAmelCase : List[Any] = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
168
0
import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class A( unittest.TestCase ): '''simple docstring''' @property def a__ ( self : str ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) return model def a__ ( self : List[str] ) -> int: """simple docstring""" lowerCamelCase_ = self.dummy_uncond_unet lowerCamelCase_ = PNDMScheduler() lowerCamelCase_ = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pndm(generator=A_ , num_inference_steps=20 , output_type='numpy' ).images lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pndm(generator=A_ , num_inference_steps=20 , output_type='numpy' , return_dict=A_ )[0] lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = 'google/ddpm-cifar10-32' lowerCamelCase_ = UNetaDModel.from_pretrained(A_ ) lowerCamelCase_ = PNDMScheduler() lowerCamelCase_ = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = pndm(generator=A_ , output_type='numpy' ).images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
204
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_fnet import FNetTokenizer else: lowerCamelCase : List[Any] = None lowerCamelCase : int = logging.get_logger(__name__) lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} lowerCamelCase : Optional[int] = { "vocab_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model", }, "tokenizer_file": { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json", }, } lowerCamelCase : List[Any] = { "google/fnet-base": 512, "google/fnet-large": 512, } lowerCamelCase : Any = "▁" class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''token_type_ids'''] UpperCamelCase = FNetTokenizer def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]: """simple docstring""" lowerCamelCase_ = ( AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ ) if isinstance(A_ , A_ ) else mask_token ) super().__init__( A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , ) lowerCamelCase_ = do_lower_case lowerCamelCase_ = remove_space lowerCamelCase_ = keep_accents lowerCamelCase_ = vocab_file lowerCamelCase_ = False if not self.vocab_file else True def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(A_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
204
1
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __A = logging.get_logger(__name__) __A = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart __A = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } __A = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = BartTokenizer def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , lowerCamelCase__=True , **lowerCamelCase__ , ) -> Any: '''simple docstring''' super().__init__( lowerCamelCase__ , lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , trim_offsets=lowerCamelCase__ , **lowerCamelCase__ , ) __lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space: __lowerCamelCase = getattr(lowerCamelCase__ , pre_tok_state.pop('type' ) ) __lowerCamelCase = add_prefix_space __lowerCamelCase = pre_tok_class(**lowerCamelCase__ ) __lowerCamelCase = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` __lowerCamelCase = 'post_processor' __lowerCamelCase = getattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) if tokenizer_component_instance: __lowerCamelCase = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __lowerCamelCase = tuple(state['sep'] ) if "cls" in state: __lowerCamelCase = tuple(state['cls'] ) __lowerCamelCase = False if state.get('add_prefix_space' , lowerCamelCase__ ) != add_prefix_space: __lowerCamelCase = add_prefix_space __lowerCamelCase = True if state.get('trim_offsets' , lowerCamelCase__ ) != trim_offsets: __lowerCamelCase = trim_offsets __lowerCamelCase = True if changes_to_apply: __lowerCamelCase = getattr(lowerCamelCase__ , state.pop('type' ) ) __lowerCamelCase = component_class(**lowerCamelCase__ ) setattr(self.backend_tokenizer , lowerCamelCase__ , lowerCamelCase__ ) @property def lowercase_ ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else value __lowerCamelCase = value def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding: '''simple docstring''' __lowerCamelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> BatchEncoding: '''simple docstring''' __lowerCamelCase = kwargs.get('is_split_into_words' , lowerCamelCase__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*lowerCamelCase__ , **lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' __lowerCamelCase = self._tokenizer.model.save(lowerCamelCase__ , name=lowerCamelCase__ ) return tuple(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> Tuple: '''simple docstring''' __lowerCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]: '''simple docstring''' __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
351
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __A = logging.get_logger(__name__) __A = { "openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json", } # fmt: off __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_57, 3_66, 4_38, 5_32, 6_85, 7_05, 7_96, 9_30, 10_58, 12_20, 12_67, 12_79, 13_03, 13_43, 13_77, 13_91, 16_35, 17_82, 18_75, 21_62, 23_61, 24_88, 34_67, 40_08, 42_11, 46_00, 48_08, 52_99, 58_55, 63_29, 72_03, 96_09, 99_59, 1_05_63, 1_07_86, 1_14_20, 1_17_09, 1_19_07, 1_31_63, 1_36_97, 1_37_00, 1_48_08, 1_53_06, 1_64_10, 1_67_91, 1_79_92, 1_92_03, 1_95_10, 2_07_24, 2_23_05, 2_29_35, 2_70_07, 3_01_09, 3_04_20, 3_34_09, 3_49_49, 4_02_83, 4_04_93, 4_05_49, 4_72_82, 4_91_46, 5_02_57, 5_03_59, 5_03_60, 5_03_61 ] __A = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 3_59, 5_03, 5_22, 5_42, 8_73, 8_93, 9_02, 9_18, 9_22, 9_31, 13_50, 18_53, 19_82, 24_60, 26_27, 32_46, 32_53, 32_68, 35_36, 38_46, 39_61, 41_83, 46_67, 65_85, 66_47, 72_73, 90_61, 93_83, 1_04_28, 1_09_29, 1_19_38, 1_20_33, 1_23_31, 1_25_62, 1_37_93, 1_41_57, 1_46_35, 1_52_65, 1_56_18, 1_65_53, 1_66_04, 1_83_62, 1_89_56, 2_00_75, 2_16_75, 2_25_20, 2_61_30, 2_61_61, 2_64_35, 2_82_79, 2_94_64, 3_16_50, 3_23_02, 3_24_70, 3_68_65, 4_28_63, 4_74_25, 4_98_70, 5_02_54, 5_02_58, 5_03_60, 5_03_61, 5_03_62 ] class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''whisper''' snake_case_ = ['''past_key_values'''] snake_case_ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self , lowerCamelCase__=51_865 , lowerCamelCase__=80 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=4 , lowerCamelCase__=1_536 , lowerCamelCase__=1_536 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=50_257 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="gelu" , lowerCamelCase__=256 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=False , lowerCamelCase__=1_500 , lowerCamelCase__=448 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=50_256 , lowerCamelCase__=None , lowerCamelCase__=[220, 50_256] , lowerCamelCase__=False , lowerCamelCase__=256 , lowerCamelCase__=False , lowerCamelCase__=0.05 , lowerCamelCase__=10 , lowerCamelCase__=2 , lowerCamelCase__=0.0 , lowerCamelCase__=10 , lowerCamelCase__=0 , lowerCamelCase__=7 , **lowerCamelCase__ , ) -> str: '''simple docstring''' __lowerCamelCase = vocab_size __lowerCamelCase = num_mel_bins __lowerCamelCase = d_model __lowerCamelCase = encoder_layers __lowerCamelCase = encoder_attention_heads __lowerCamelCase = decoder_layers __lowerCamelCase = decoder_attention_heads __lowerCamelCase = decoder_ffn_dim __lowerCamelCase = encoder_ffn_dim __lowerCamelCase = dropout __lowerCamelCase = attention_dropout __lowerCamelCase = activation_dropout __lowerCamelCase = activation_function __lowerCamelCase = init_std __lowerCamelCase = encoder_layerdrop __lowerCamelCase = decoder_layerdrop __lowerCamelCase = use_cache __lowerCamelCase = encoder_layers __lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True __lowerCamelCase = max_source_positions __lowerCamelCase = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. __lowerCamelCase = classifier_proj_size __lowerCamelCase = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowerCamelCase = apply_spec_augment __lowerCamelCase = mask_time_prob __lowerCamelCase = mask_time_length __lowerCamelCase = mask_time_min_masks __lowerCamelCase = mask_feature_prob __lowerCamelCase = mask_feature_length __lowerCamelCase = mask_feature_min_masks __lowerCamelCase = median_filter_width super().__init__( pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , suppress_tokens=lowerCamelCase__ , begin_suppress_tokens=lowerCamelCase__ , **lowerCamelCase__ , ) class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __lowerCamelCase = OrderedDict( [ ('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}), ] ) if self.use_past: __lowerCamelCase = {0: 'batch'} else: __lowerCamelCase = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCamelCase__ , direction='inputs' ) return common_inputs def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = -1 , lowerCamelCase__ = -1 , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = 22_050 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 220 , ) -> Mapping[str, Any]: '''simple docstring''' __lowerCamelCase = OrderedDict() __lowerCamelCase = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=lowerCamelCase__ , framework=lowerCamelCase__ , sampling_rate=lowerCamelCase__ , time_duration=lowerCamelCase__ , frequency=lowerCamelCase__ , ) __lowerCamelCase = encoder_inputs['input_features'].shape[2] __lowerCamelCase = encoder_sequence_length // 2 if self.use_past else seq_length __lowerCamelCase = super().generate_dummy_inputs( preprocessor.tokenizer , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) __lowerCamelCase = encoder_inputs.pop('input_features' ) __lowerCamelCase = decoder_inputs.pop('decoder_input_ids' ) if "past_key_values" in decoder_inputs: __lowerCamelCase = decoder_inputs.pop('past_key_values' ) return dummy_inputs @property def lowercase_ ( self ) -> float: '''simple docstring''' return 1e-3
348
0
'''simple docstring''' from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : torch.FloatTensor class lowercase_ (nn.Module ): """simple docstring""" def __init__( self : str ,lowercase__ : Tuple=3 ,lowercase__ : List[str]=3 ,lowercase__ : str=("DownEncoderBlock2D",) ,lowercase__ : List[str]=(6_4,) ,lowercase__ : List[str]=2 ,lowercase__ : Optional[Any]=3_2 ,lowercase__ : List[str]="silu" ,lowercase__ : List[str]=True ,): super().__init__() __lowercase = layers_per_block __lowercase = torch.nn.Convad( lowercase__ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,) __lowercase = None __lowercase = nn.ModuleList([] ) # down __lowercase = block_out_channels[0] for i, down_block_type in enumerate(lowercase__ ): __lowercase = output_channel __lowercase = block_out_channels[i] __lowercase = i == len(lowercase__ ) - 1 __lowercase = get_down_block( lowercase__ ,num_layers=self.layers_per_block ,in_channels=lowercase__ ,out_channels=lowercase__ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=lowercase__ ,resnet_groups=lowercase__ ,attention_head_dim=lowercase__ ,temb_channels=lowercase__ ,) self.down_blocks.append(lowercase__ ) # mid __lowercase = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=lowercase__ ,output_scale_factor=1 ,resnet_time_scale_shift='''default''' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=lowercase__ ,temb_channels=lowercase__ ,) # out __lowercase = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=lowercase__ ,eps=1e-6 ) __lowercase = nn.SiLU() __lowercase = 2 * out_channels if double_z else out_channels __lowercase = nn.Convad(block_out_channels[-1] ,lowercase__ ,3 ,padding=1 ) __lowercase = False def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Any ): __lowercase = x __lowercase = self.conv_in(lowercase__ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowercase__ : List[Any] ): def custom_forward(*lowercase__ : Dict ): return module(*lowercase__ ) return custom_forward # down if is_torch_version('''>=''' ,'''1.11.0''' ): for down_block in self.down_blocks: __lowercase = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase__ ) ,lowercase__ ,use_reentrant=lowercase__ ) # middle __lowercase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,lowercase__ ,use_reentrant=lowercase__ ) else: for down_block in self.down_blocks: __lowercase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) ,lowercase__ ) # middle __lowercase = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,lowercase__ ) else: # down for down_block in self.down_blocks: __lowercase = down_block(lowercase__ ) # middle __lowercase = self.mid_block(lowercase__ ) # post-process __lowercase = self.conv_norm_out(lowercase__ ) __lowercase = self.conv_act(lowercase__ ) __lowercase = self.conv_out(lowercase__ ) return sample class lowercase_ (nn.Module ): """simple docstring""" def __init__( self : str ,lowercase__ : Optional[Any]=3 ,lowercase__ : Any=3 ,lowercase__ : Union[str, Any]=("UpDecoderBlock2D",) ,lowercase__ : Any=(6_4,) ,lowercase__ : str=2 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : Tuple="silu" ,lowercase__ : Union[str, Any]="group" ,): super().__init__() __lowercase = layers_per_block __lowercase = nn.Convad( lowercase__ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,) __lowercase = None __lowercase = nn.ModuleList([] ) __lowercase = in_channels if norm_type == '''spatial''' else None # mid __lowercase = UNetMidBlockaD( in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=lowercase__ ,output_scale_factor=1 ,resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=lowercase__ ,temb_channels=lowercase__ ,) # up __lowercase = list(reversed(lowercase__ ) ) __lowercase = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowercase__ ): __lowercase = output_channel __lowercase = reversed_block_out_channels[i] __lowercase = i == len(lowercase__ ) - 1 __lowercase = get_up_block( lowercase__ ,num_layers=self.layers_per_block + 1 ,in_channels=lowercase__ ,out_channels=lowercase__ ,prev_output_channel=lowercase__ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=lowercase__ ,resnet_groups=lowercase__ ,attention_head_dim=lowercase__ ,temb_channels=lowercase__ ,resnet_time_scale_shift=lowercase__ ,) self.up_blocks.append(lowercase__ ) __lowercase = output_channel # out if norm_type == "spatial": __lowercase = SpatialNorm(block_out_channels[0] ,lowercase__ ) else: __lowercase = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=lowercase__ ,eps=1e-6 ) __lowercase = nn.SiLU() __lowercase = nn.Convad(block_out_channels[0] ,lowercase__ ,3 ,padding=1 ) __lowercase = False def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : Optional[Any] ,lowercase__ : List[Any]=None ): __lowercase = z __lowercase = self.conv_in(lowercase__ ) __lowercase = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowercase__ : str ): def custom_forward(*lowercase__ : List[Any] ): return module(*lowercase__ ) return custom_forward if is_torch_version('''>=''' ,'''1.11.0''' ): # middle __lowercase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,lowercase__ ,lowercase__ ,use_reentrant=lowercase__ ) __lowercase = sample.to(lowercase__ ) # up for up_block in self.up_blocks: __lowercase = torch.utils.checkpoint.checkpoint( create_custom_forward(lowercase__ ) ,lowercase__ ,lowercase__ ,use_reentrant=lowercase__ ) else: # middle __lowercase = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) ,lowercase__ ,lowercase__ ) __lowercase = sample.to(lowercase__ ) # up for up_block in self.up_blocks: __lowercase = torch.utils.checkpoint.checkpoint(create_custom_forward(lowercase__ ) ,lowercase__ ,lowercase__ ) else: # middle __lowercase = self.mid_block(lowercase__ ,lowercase__ ) __lowercase = sample.to(lowercase__ ) # up for up_block in self.up_blocks: __lowercase = up_block(lowercase__ ,lowercase__ ) # post-process if latent_embeds is None: __lowercase = self.conv_norm_out(lowercase__ ) else: __lowercase = self.conv_norm_out(lowercase__ ,lowercase__ ) __lowercase = self.conv_act(lowercase__ ) __lowercase = self.conv_out(lowercase__ ) return sample class lowercase_ (nn.Module ): """simple docstring""" def __init__( self : int ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : str ,lowercase__ : int=None ,lowercase__ : int="random" ,lowercase__ : Tuple=False ,lowercase__ : List[str]=True ): super().__init__() __lowercase = n_e __lowercase = vq_embed_dim __lowercase = beta __lowercase = legacy __lowercase = nn.Embedding(self.n_e ,self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e ) __lowercase = remap if self.remap is not None: self.register_buffer('''used''' ,torch.tensor(np.load(self.remap ) ) ) __lowercase = self.used.shape[0] __lowercase = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": __lowercase = self.re_embed __lowercase = self.re_embed + 1 print( F"Remapping {self.n_e} indices to {self.re_embed} indices. " F"Using {self.unknown_index} for unknown indices." ) else: __lowercase = n_e __lowercase = sane_index_shape def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : List[Any] ): __lowercase = inds.shape assert len(lowercase__ ) > 1 __lowercase = inds.reshape(ishape[0] ,-1 ) __lowercase = self.used.to(lowercase__ ) __lowercase = (inds[:, :, None] == used[None, None, ...]).long() __lowercase = match.argmax(-1 ) __lowercase = match.sum(2 ) < 1 if self.unknown_index == "random": __lowercase = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device ) else: __lowercase = self.unknown_index return new.reshape(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any ): __lowercase = inds.shape assert len(lowercase__ ) > 1 __lowercase = inds.reshape(ishape[0] ,-1 ) __lowercase = self.used.to(lowercase__ ) if self.re_embed > self.used.shape[0]: # extra token __lowercase = 0 # simply set to zero __lowercase = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,lowercase__ ) return back.reshape(lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : Tuple ): # reshape z -> (batch, height, width, channel) and flatten __lowercase = z.permute(0 ,2 ,3 ,1 ).contiguous() __lowercase = z.view(-1 ,self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z __lowercase = torch.argmin(torch.cdist(lowercase__ ,self.embedding.weight ) ,dim=1 ) __lowercase = self.embedding(lowercase__ ).view(z.shape ) __lowercase = None __lowercase = None # compute loss for embedding if not self.legacy: __lowercase = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: __lowercase = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients __lowercase = z + (z_q - z).detach() # reshape back to match original input shape __lowercase = z_q.permute(0 ,3 ,1 ,2 ).contiguous() if self.remap is not None: __lowercase = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis __lowercase = self.remap_to_used(lowercase__ ) __lowercase = min_encoding_indices.reshape(-1 ,1 ) # flatten if self.sane_index_shape: __lowercase = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : List[Any] ,lowercase__ : List[str] ): # shape specifying (batch, height, width, channel) if self.remap is not None: __lowercase = indices.reshape(shape[0] ,-1 ) # add batch axis __lowercase = self.unmap_to_all(lowercase__ ) __lowercase = indices.reshape(-1 ) # flatten again # get quantized latent vectors __lowercase = self.embedding(lowercase__ ) if shape is not None: __lowercase = z_q.view(lowercase__ ) # reshape back to match original input shape __lowercase = z_q.permute(0 ,3 ,1 ,2 ).contiguous() return z_q class lowercase_ (lowerCamelCase__ ): """simple docstring""" def __init__( self : str ,lowercase__ : Union[str, Any] ,lowercase__ : Union[str, Any]=False ): __lowercase = parameters __lowercase , __lowercase = torch.chunk(lowercase__ ,2 ,dim=1 ) __lowercase = torch.clamp(self.logvar ,-3_0.0 ,2_0.0 ) __lowercase = deterministic __lowercase = torch.exp(0.5 * self.logvar ) __lowercase = torch.exp(self.logvar ) if self.deterministic: __lowercase = __lowercase = torch.zeros_like( self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[torch.Generator] = None ): # make sure sample is on the same device as the parameters and has same dtype __lowercase = randn_tensor( self.mean.shape ,generator=lowercase__ ,device=self.parameters.device ,dtype=self.parameters.dtype ) __lowercase = self.mean + self.std * sample return x def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : Any=None ): if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean ,2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar ,dim=[1, 2, 3] ,) def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[str] ,lowercase__ : str=[1, 2, 3] ): if self.deterministic: return torch.Tensor([0.0] ) __lowercase = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=lowercase__ ) def SCREAMING_SNAKE_CASE ( self : str ): return self.mean
104
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( a , unittest.TestCase ): """simple docstring""" __magic_name__ :List[str] = XGLMTokenizer __magic_name__ :Any = XGLMTokenizerFast __magic_name__ :Dict = True __magic_name__ :Union[str, Any] = True def snake_case ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ :int = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = '<pad>' lowerCAmelCase__ :int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(len(__UpperCAmelCase ) , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :List[Any] = XGLMTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase__ :int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase__ :Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) lowerCAmelCase__ :Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase ) self.assertListEqual( __UpperCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def snake_case ( self ): '''simple docstring''' return XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) def snake_case ( self ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(__UpperCAmelCase , f.name ) lowerCAmelCase__ :Dict = XGLMTokenizer(f.name , keep_accents=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = pickle.dumps(__UpperCAmelCase ) pickle.loads(__UpperCAmelCase ) def snake_case ( self ): '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ :Optional[Any] = self.get_tokenizer() lowerCAmelCase__ :List[str] = self.get_rust_tokenizer() lowerCAmelCase__ :Optional[Any] = 'I was born in 92000, and this is falsé.' lowerCAmelCase__ :Dict = tokenizer.tokenize(__UpperCAmelCase ) lowerCAmelCase__ :Union[str, Any] = rust_tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :int = self.get_rust_tokenizer() lowerCAmelCase__ :Dict = tokenizer.encode(__UpperCAmelCase ) lowerCAmelCase__ :Tuple = rust_tokenizer.encode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :str = 'Hello World!' lowerCAmelCase__ :Tuple = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowerCAmelCase__ :List[str] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) ) @slow def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = { 'input_ids': [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='facebook/xglm-564M' , padding=__UpperCAmelCase , )
293
0
"""simple docstring""" def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ): """simple docstring""" A__ = int(UpperCamelCase__ ) # Initialize Result A__ = [] # Traverse through all denomination for denomination in reversed(UpperCamelCase__ ): # Find denominations while int(UpperCamelCase__ ) >= int(UpperCamelCase__ ): total_value -= int(UpperCamelCase__ ) answer.append(UpperCamelCase__ ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": __lowerCamelCase = [] __lowerCamelCase = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): __lowerCamelCase = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F'''Denomination {i}: ''').strip())) __lowerCamelCase = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter __lowerCamelCase = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00] __lowerCamelCase = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F'''Following is minimal change for {value}: ''') __lowerCamelCase = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
154
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): """simple docstring""" A__ = len(UpperCamelCase__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(UpperCamelCase__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCamelCase__ , UpperCamelCase__ , ) def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" A__ = [] depth_first_search([] , [] , [] , UpperCamelCase__ , UpperCamelCase__ ) # Print all the boards for board in boards: for column in board: print(UpperCamelCase__ ) print('' ) print(len(UpperCamelCase__ ) , 'solutions were found.' ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
154
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() UpperCamelCase_ = logging.get_logger(__name__) def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: UpperCAmelCase_ : Optional[int] = 128 elif "12-12" in model_name: UpperCAmelCase_ : List[Any] = 12 UpperCAmelCase_ : Optional[Any] = 12 elif "14-14" in model_name: UpperCAmelCase_ : Optional[Any] = 14 UpperCAmelCase_ : int = 14 elif "16-16" in model_name: UpperCAmelCase_ : List[str] = 16 UpperCAmelCase_ : str = 16 else: raise ValueError("""Model not supported""" ) UpperCAmelCase_ : str = """huggingface/label-files""" if "speech-commands" in model_name: UpperCAmelCase_ : List[Any] = 35 UpperCAmelCase_ : Union[str, Any] = """speech-commands-v2-id2label.json""" else: UpperCAmelCase_ : str = 527 UpperCAmelCase_ : Optional[int] = """audioset-id2label.json""" UpperCAmelCase_ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase_ : Tuple = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()} UpperCAmelCase_ : str = idalabel UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_ ( _a : str ): '''simple docstring''' if "module.v" in name: UpperCAmelCase_ : Dict = name.replace("""module.v""" , """audio_spectrogram_transformer""" ) if "cls_token" in name: UpperCAmelCase_ : List[Any] = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "dist_token" in name: UpperCAmelCase_ : Dict = name.replace("""dist_token""" , """embeddings.distillation_token""" ) if "pos_embed" in name: UpperCAmelCase_ : List[str] = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: UpperCAmelCase_ : List[str] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) # transformer blocks if "blocks" in name: UpperCAmelCase_ : Optional[int] = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: UpperCAmelCase_ : Any = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: UpperCAmelCase_ : str = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: UpperCAmelCase_ : Tuple = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" ) # final layernorm if "audio_spectrogram_transformer.norm" in name: UpperCAmelCase_ : Union[str, Any] = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" ) # classifier head if "module.mlp_head.0" in name: UpperCAmelCase_ : int = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" ) if "module.mlp_head.1" in name: UpperCAmelCase_ : Dict = name.replace("""module.mlp_head.1""" , """classifier.dense""" ) return name def lowerCamelCase_ ( _a : Any , _a : Dict ): '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Optional[Any] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: UpperCAmelCase_ : str = key.split(""".""" ) UpperCAmelCase_ : Optional[Any] = int(key_split[3] ) UpperCAmelCase_ : Optional[int] = config.hidden_size if "weight" in key: UpperCAmelCase_ : Optional[Any] = val[:dim, :] UpperCAmelCase_ : Optional[int] = val[dim : dim * 2, :] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : Any = val[:dim] UpperCAmelCase_ : Optional[int] = val[dim : dim * 2] UpperCAmelCase_ : List[str] = val[-dim:] else: UpperCAmelCase_ : Dict = val return orig_state_dict def lowerCamelCase_ ( _a : int ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = [ """module.v.head.weight""", """module.v.head.bias""", """module.v.head_dist.weight""", """module.v.head_dist.bias""", ] for k in ignore_keys: state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def lowerCamelCase_ ( _a : int , _a : List[Any] , _a : Union[str, Any]=False ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ : Dict = { """ast-finetuned-audioset-10-10-0.4593""": ( """https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.450""": ( """https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448""": ( """https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1""" ), """ast-finetuned-audioset-10-10-0.448-v2""": ( """https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1""" ), """ast-finetuned-audioset-12-12-0.447""": ( """https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1""" ), """ast-finetuned-audioset-14-14-0.443""": ( """https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1""" ), """ast-finetuned-audioset-16-16-0.442""": ( """https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1""" ), """ast-finetuned-speech-commands-v2""": ( """https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1""" ), } # load original state_dict UpperCAmelCase_ : List[str] = model_name_to_url[model_name] UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" ) # remove some keys remove_keys(SCREAMING_SNAKE_CASE_ ) # rename some keys UpperCAmelCase_ : Optional[int] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # load 🤗 model UpperCAmelCase_ : Any = ASTForAudioClassification(SCREAMING_SNAKE_CASE_ ) model.eval() model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 UpperCAmelCase_ : Optional[int] = -4.2_6_7_7_3_9_3 if """speech-commands""" not in model_name else -6.8_4_5_9_7_8 UpperCAmelCase_ : Any = 4.5_6_8_9_9_7_4 if """speech-commands""" not in model_name else 5.5_6_5_4_5_2_6 UpperCAmelCase_ : Union[str, Any] = 1024 if """speech-commands""" not in model_name else 128 UpperCAmelCase_ : Dict = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) if "speech-commands" in model_name: UpperCAmelCase_ : Any = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" ) UpperCAmelCase_ : str = dataset[0]["""audio"""]["""array"""] else: UpperCAmelCase_ : List[str] = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = torchaudio.load(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ : Any = waveform.squeeze().numpy() UpperCAmelCase_ : int = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=1_6000 , return_tensors="""pt""" ) # forward pass UpperCAmelCase_ : List[Any] = model(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_ : Optional[Any] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": UpperCAmelCase_ : int = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": UpperCAmelCase_ : Dict = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": UpperCAmelCase_ : Tuple = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": UpperCAmelCase_ : Optional[int] = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": UpperCAmelCase_ : Dict = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": UpperCAmelCase_ : List[Any] = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": UpperCAmelCase_ : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] ) elif model_name == "ast-finetuned-speech-commands-v2": UpperCAmelCase_ : Dict = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] ) else: raise ValueError("""Unknown model name""" ) if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ): raise ValueError("""Logits don\'t match""" ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("""Pushing model and feature extractor to the hub...""" ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''ast-finetuned-audioset-10-10-0.4593''', type=str, help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCamelCase_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
345
"""simple docstring""" __UpperCamelCase = 0 # The first color of the flag. __UpperCamelCase = 1 # The second color of the flag. __UpperCamelCase = 2 # The third color of the flag. __UpperCamelCase = (red, white, blue) def lowercase (SCREAMING_SNAKE_CASE_ : list ) -> list: if not sequence: return [] if len(SCREAMING_SNAKE_CASE_ ) == 1: return list(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ ) - 1 SCREAMING_SNAKE_CASE = 0 while mid <= high: if sequence[mid] == colors[0]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = sequence[high], sequence[mid] high -= 1 else: SCREAMING_SNAKE_CASE = F'The elements inside the sequence must contains only {colors} values' raise ValueError(SCREAMING_SNAKE_CASE_ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() __UpperCamelCase = input('''Enter numbers separated by commas:\n''').strip() __UpperCamelCase = [int(item.strip()) for item in user_input.split(''',''')] print(f'''{dutch_national_flag_sort(unsorted)}''')
113
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCamelCase ( a_ ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "CLIPImageProcessor" a = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any]=None , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : List[str]): _A : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , SCREAMING_SNAKE_CASE , ) _A : str = kwargs.pop('feature_extractor') _A : List[str] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def __call__( self : str , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : Tuple): if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.') if text is not None: _A : Optional[Any] = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) if images is not None: _A : str = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) if text is not None and images is not None: _A : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE) , tensor_type=SCREAMING_SNAKE_CASE) def A ( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str]): return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) def A ( self : str , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str): return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) @property def A ( self : str): _A : Optional[int] = self.tokenizer.model_input_names _A : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def A ( self : Any): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def A ( self : str): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE , ) return self.image_processor
227
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : str=0.999 ,lowerCamelCase : int="cosine" ,): if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCamelCase : Tuple ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCamelCase : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _A : Tuple = [] for i in range(lowerCamelCase ): _A : List[Any] = i / num_diffusion_timesteps _A : List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCamelCase ) / alpha_bar_fn(lowerCamelCase ) ,lowerCamelCase ) ) return torch.tensor(lowerCamelCase ,dtype=torch.floataa ) class __lowerCamelCase ( a_ , a_ ): """simple docstring""" a = [e.name for e in KarrasDiffusionSchedulers] a = 2 @register_to_config def __init__( self : int , SCREAMING_SNAKE_CASE : int = 1000 , SCREAMING_SNAKE_CASE : float = 0.0_0085 , SCREAMING_SNAKE_CASE : float = 0.012 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[Union[np.ndarray, List[float]]] = None , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : str = "linspace" , SCREAMING_SNAKE_CASE : int = 0 , ): if trained_betas is not None: _A : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "linear": _A : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _A : Any = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _A : Optional[Any] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}') _A : Any = 1.0 - self.betas _A : List[Any] = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any=None): if schedule_timesteps is None: _A : Dict = self.timesteps _A : List[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: _A : Dict = 1 if len(SCREAMING_SNAKE_CASE) > 1 else 0 else: _A : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep _A : int = self._index_counter[timestep_int] return indices[pos].item() @property def A ( self : Optional[Any]): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def A ( self : List[Any] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , ): _A : Tuple = self.index_for_timestep(SCREAMING_SNAKE_CASE) if self.state_in_first_order: _A : Any = self.sigmas[step_index] else: _A : int = self.sigmas_interpol[step_index] _A : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5) return sample def A ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , ): _A : Optional[Any] = num_inference_steps _A : int = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _A : Tuple = np.linspace(0 , num_train_timesteps - 1 , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE)[::-1].copy() elif self.config.timestep_spacing == "leading": _A : Optional[Any] = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : int = (np.arange(0 , SCREAMING_SNAKE_CASE) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _A : List[str] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _A : str = (np.arange(SCREAMING_SNAKE_CASE , 0 , -step_ratio)).round().copy().astype(SCREAMING_SNAKE_CASE) timesteps -= 1 else: raise ValueError( F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.') _A : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) _A : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _A : str = np.interp(SCREAMING_SNAKE_CASE , np.arange(0 , len(SCREAMING_SNAKE_CASE)) , SCREAMING_SNAKE_CASE) _A : str = np.concatenate([sigmas, [0.0]]).astype(np.floataa) _A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(device=SCREAMING_SNAKE_CASE) # interpolate sigmas _A : Optional[int] = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() _A : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) _A : List[Any] = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(SCREAMING_SNAKE_CASE).startswith('mps'): # mps does not support float64 _A : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=torch.floataa) else: _A : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) # interpolate timesteps _A : Optional[int] = self.sigma_to_t(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE , dtype=timesteps.dtype) _A : Union[str, Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() _A : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps]) _A : str = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _A : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE) def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]): # get log sigma _A : Dict = sigma.log() # get distribution _A : Any = log_sigma - self.log_sigmas[:, None] # get sigmas range _A : Tuple = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) _A : Union[str, Any] = low_idx + 1 _A : Dict = self.log_sigmas[low_idx] _A : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas _A : Dict = (low - log_sigma) / (low - high) _A : Union[str, Any] = w.clamp(0 , 1) # transform interpolation to time range _A : int = (1 - w) * low_idx + w * high_idx _A : Any = t.view(sigma.shape) return t @property def A ( self : Any): return self.sample is None def A ( self : int , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : Union[float, torch.FloatTensor] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, np.ndarray] , SCREAMING_SNAKE_CASE : bool = True , ): _A : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE) # advance index counter by 1 _A : Dict = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _A : Tuple = self.sigmas[step_index] _A : Dict = self.sigmas_interpol[step_index + 1] _A : Union[str, Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method _A : int = self.sigmas[step_index - 1] _A : Union[str, Any] = self.sigmas_interpol[step_index] _A : Dict = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _A : List[Any] = 0 _A : Dict = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _A : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol _A : Tuple = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _A : Any = sigma_hat if self.state_in_first_order else sigma_interpol _A : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`') if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _A : int = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _A : str = sigma_interpol - sigma_hat # store for 2nd order step _A : List[str] = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order _A : List[Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep _A : Optional[int] = sigma_next - sigma_hat _A : str = self.sample _A : Any = None _A : Tuple = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE) def A ( self : Any , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples _A : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE): # mps does not support float64 _A : Any = self.timesteps.to(original_samples.device , dtype=torch.floataa) _A : List[str] = timesteps.to(original_samples.device , dtype=torch.floataa) else: _A : str = self.timesteps.to(original_samples.device) _A : str = timesteps.to(original_samples.device) _A : int = [self.index_for_timestep(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for t in timesteps] _A : Tuple = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): _A : List[Any] = sigma.unsqueeze(-1) _A : Dict = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any]): return self.config.num_train_timesteps
227
1
"""simple docstring""" import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='''session''' ) def __lowercase ( ): snake_case_ : Optional[Any] = 10 snake_case_ : Tuple = datasets.Features( { '''tokens''': datasets.Sequence(datasets.Value('''string''' ) ), '''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ), '''answers''': datasets.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), '''id''': datasets.Value('''int64''' ), } ) snake_case_ : str = datasets.Dataset.from_dict( { '''tokens''': [['''foo'''] * 5] * n, '''labels''': [[1] * 5] * n, '''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10, '''id''': list(range(_a ) ), } , features=_a , ) return dataset @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): snake_case_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' ) dataset.map(cache_file_name=_a ) return filename # FILE_CONTENT + files lowercase__ : Optional[Any] = '''\\n Text data.\n Second line of data.''' @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt''' snake_case_ : int = FILE_CONTENT with open(_a , '''w''' ) as f: f.write(_a ) return filename @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): import bza snake_case_ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2''' snake_case_ : List[Any] = bytes(_a , '''utf-8''' ) with bza.open(_a , '''wb''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): import gzip snake_case_ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' ) snake_case_ : Tuple = bytes(_a , '''utf-8''' ) with gzip.open(_a , '''wb''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): if datasets.config.LZ4_AVAILABLE: import lza.frame snake_case_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4''' snake_case_ : Any = bytes(_a , '''utf-8''' ) with lza.frame.open(_a , '''wb''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): if datasets.config.PY7ZR_AVAILABLE: import pyazr snake_case_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z''' with pyazr.SevenZipFile(_a , '''w''' ) as archive: archive.write(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): import tarfile snake_case_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar''' with tarfile.TarFile(_a , '''w''' ) as f: f.add(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): import lzma snake_case_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz''' snake_case_ : Optional[int] = bytes(_a , '''utf-8''' ) with lzma.open(_a , '''wb''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): import zipfile snake_case_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd snake_case_ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst''' snake_case_ : Any = bytes(_a , '''utf-8''' ) with zstd.open(_a , '''wb''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.xml''' snake_case_ : Any = textwrap.dedent( '''\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>''' ) with open(_a , '''w''' ) as f: f.write(_a ) return filename lowercase__ : Optional[Any] = [ {'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0}, ] lowercase__ : Optional[int] = [ {'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0}, {'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0}, ] lowercase__ : Optional[int] = { '''col_1''': ['''0''', '''1''', '''2''', '''3'''], '''col_2''': [0, 1, 2, 3], '''col_3''': [0.0, 1.0, 2.0, 3.0], } lowercase__ : Any = [ {'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0}, {'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1}, ] lowercase__ : str = [ {'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0}, {'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0}, {'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0}, {'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0}, ] @pytest.fixture(scope='''session''' ) def __lowercase ( ): return DATA_DICT_OF_LISTS @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : str = datasets.Dataset.from_dict(_a ) snake_case_ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' ) dataset.map(cache_file_name=_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' ) with contextlib.closing(sqlitea.connect(_a ) ) as con: snake_case_ : Dict = con.cursor() cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' ) for item in DATA: cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' ) with open(_a , '''w''' , newline='''''' ) as f: snake_case_ : Any = csv.DictWriter(_a , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' ) with open(_a , '''w''' , newline='''''' ) as f: snake_case_ : Tuple = csv.DictWriter(_a , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): import bza snake_case_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2''' with open(_a , '''rb''' ) as f: snake_case_ : Dict = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(_a , '''wb''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename(_a ) ) f.write(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) ) f.write(_a , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.join('''main_dir''' , os.path.basename(_a ) ) ) f.write(_a , arcname=os.path.join('''main_dir''' , os.path.basename(_a ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' ) snake_case_ : Any = pa.schema( { '''col_1''': pa.string(), '''col_2''': pa.intaa(), '''col_3''': pa.floataa(), } ) with open(_a , '''wb''' ) as f: snake_case_ : str = pq.ParquetWriter(_a , schema=_a ) snake_case_ : Union[str, Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_a ) )] for k in DATA[0]} , schema=_a ) writer.write_table(_a ) writer.close() return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) snake_case_ : Any = {'''data''': DATA} with open(_a , '''w''' ) as f: json.dump(_a , _a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) snake_case_ : int = {'''data''': DATA_DICT_OF_LISTS} with open(_a , '''w''' ) as f: json.dump(_a , _a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' ) with open(_a , '''w''' ) as f: for item in DATA: f.write(json.dumps(_a ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' ) with open(_a , '''w''' ) as f: for item in DATA: f.write(json.dumps(_a ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' ) with open(_a , '''w''' ) as f: for item in DATA_312: f.write(json.dumps(_a ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' ) with open(_a , '''w''' ) as f: for item in DATA_STR: f.write(json.dumps(_a ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): import gzip snake_case_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' ) with open(_a , '''rb''' ) as orig_file: with gzip.open(_a , '''wb''' ) as zipped_file: zipped_file.writelines(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): import gzip snake_case_ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' ) with open(_a , '''rb''' ) as orig_file: with gzip.open(_a , '''wb''' ) as zipped_file: zipped_file.writelines(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename(_a ) ) f.write(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a , _a ): snake_case_ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.join('''nested''' , os.path.basename(_a ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.join('''main_dir''' , os.path.basename(_a ) ) ) f.write(_a , arcname=os.path.join('''main_dir''' , os.path.basename(_a ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(_a , '''w''' ) as f: f.add(_a , arcname=os.path.basename(_a ) ) f.add(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a , _a ): snake_case_ : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(_a , '''w''' ) as f: f.add(_a , arcname=os.path.join('''nested''' , os.path.basename(_a ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Tuple = ['''0''', '''1''', '''2''', '''3'''] snake_case_ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' ) with open(_a , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : Dict = ['''0''', '''1''', '''2''', '''3'''] snake_case_ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' ) with open(_a , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : List[str] = ['''0''', '''1''', '''2''', '''3'''] snake_case_ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc''' with open(_a , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename(_a ) ) f.write(_a , arcname=os.path.basename(_a ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : Any = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.join('''main_dir''' , os.path.basename(_a ) ) ) f.write(_a , arcname=os.path.join('''main_dir''' , os.path.basename(_a ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a , _a ): snake_case_ : Optional[int] = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename('''unsupported.ext''' ) ) f.write(_a , arcname=os.path.basename('''unsupported_2.ext''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : int = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] ) snake_case_ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' ) with open(_a , '''w''' , encoding='''utf-8''' ) as f: f.write(_a ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( ): return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' ) @pytest.fixture(scope='''session''' ) def __lowercase ( ): return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' ) @pytest.fixture(scope='''session''' ) def __lowercase ( _a , _a ): snake_case_ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip''' with zipfile.ZipFile(_a , '''w''' ) as f: f.write(_a , arcname=os.path.basename(_a ) ) f.write(_a , arcname=os.path.basename(_a ).replace('''.jpg''' , '''2.jpg''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowercase ( _a ): snake_case_ : str = tmp_path_factory.mktemp('''data_dir''' ) (data_dir / "subdir").mkdir() with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden file with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) return data_dir
264
"""simple docstring""" def UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) ->bool: """simple docstring""" a_ = set() # Replace all the whitespace in our sentence a_ = input_str.replace(" " , "" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCAmelCase ) == 26 def UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) ->bool: """simple docstring""" a_ = [False] * 26 for char in input_str: if char.islower(): a_ = True elif char.isupper(): a_ = True return all(UpperCAmelCase ) def UpperCamelCase ( UpperCAmelCase = "The quick brown fox jumps over the lazy dog" , ) ->bool: """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def UpperCamelCase ( ) ->None: """simple docstring""" from timeit import timeit a_ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()" , setup=UpperCAmelCase ) ) print(timeit("is_pangram_faster()" , setup=UpperCAmelCase ) ) print(timeit("is_pangram_fastest()" , setup=UpperCAmelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
243
0
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class __a : def __init__( self ): _lowerCamelCase = {} def snake_case_ ( self , a__ ): _lowerCamelCase = {} def snake_case_ ( self , a__ , a__ , a__ ): if nodea not in self.connections: self.add_node(__snake_case ) if nodea not in self.connections: self.add_node(__snake_case ) _lowerCamelCase = probability def snake_case_ ( self ): return list(self.connections ) def snake_case_ ( self , a__ ): _lowerCamelCase = 0 _lowerCamelCase = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : str , snake_case : List[str] )-> Union[str, Any]: _lowerCamelCase = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(_A , _A , _A ) _lowerCamelCase = Counter(graph.get_nodes() ) _lowerCamelCase = start for _ in range(_A ): _lowerCamelCase = graph.transition(_A ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
357
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": A_ : List[Any] =argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) A_ : List[str] =parser.parse_args() A_ : Any =download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
80
0
'''simple docstring''' from typing import Dict, List, Optional from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _UpperCamelCase = logging.get_logger(__name__) _UpperCamelCase = { '''nielsr/canine-s''': 2048, } # Unicode defines 1,114,112 total “codepoints” _UpperCamelCase = 111_4112 # Below: Constants defining canonical codepoints for special, pseudo-characters. # Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py _UpperCamelCase = 0 _UpperCamelCase = 0xe000 _UpperCamelCase = 0xe001 _UpperCamelCase = 0xe002 _UpperCamelCase = 0xe003 _UpperCamelCase = 0xe004 # Maps special codepoints to human-readable names. _UpperCamelCase = { # Special symbols are represented using codepoints values that are valid, # but designated as "Private Use", meaning that they will never be assigned # characters by the Unicode Consortium, and are thus safe for use here. # # NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly # excluded and should fail with a hard error. CLS: "[CLS]", SEP: "[SEP]", BOS: "[BOS]", MASK: "[MASK]", PAD: "[PAD]", RESERVED: "[RESERVED]", } # Maps special codepoint human-readable names to their codepoint values. _UpperCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()} class _A ( __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=chr(__UpperCAmelCase ) , __UpperCAmelCase=False , __UpperCAmelCase=2_048 , **__UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token __UpperCAmelCase : List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token __UpperCAmelCase : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token __UpperCAmelCase : List[str] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : List[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , model_max_length=__UpperCAmelCase , **__UpperCAmelCase , ) # Creates a mapping for looking up the IDs of special symbols. __UpperCAmelCase : Dict[str, int] = {} for codepoint, name in SPECIAL_CODEPOINTS.items(): __UpperCAmelCase : List[Any] = codepoint # Creates a mapping for looking up the string forms of special symbol IDs. __UpperCAmelCase : Dict[int, str] = { codepoint: name for name, codepoint in self._special_codepoints.items() } __UpperCAmelCase : Optional[Any] = UNICODE_VOCAB_SIZE __UpperCAmelCase : Dict = len(self._special_codepoints ) @property def __A ( self ) -> int: '''simple docstring''' return self._unicode_vocab_size def __A ( self , __UpperCAmelCase ) -> List[str]: '''simple docstring''' return list(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase ) -> int: '''simple docstring''' try: return ord(__UpperCAmelCase ) except TypeError: raise ValueError(f'invalid token: \'{token}\'' ) def __A ( self , __UpperCAmelCase ) -> str: '''simple docstring''' try: if index in SPECIAL_CODEPOINTS: return SPECIAL_CODEPOINTS[index] return chr(__UpperCAmelCase ) except TypeError: raise ValueError(f'invalid id: {index}' ) def __A ( self , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' return "".join(__UpperCAmelCase ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] __UpperCAmelCase : Dict = [self.cls_token_id] __UpperCAmelCase : str = cls + token_ids_a + sep if token_ids_a is not None: result += token_ids_a + sep return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = [1] + ([0] * len(__UpperCAmelCase )) + [1] if token_ids_a is not None: result += ([0] * len(__UpperCAmelCase )) + [1] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = [self.sep_token_id] __UpperCAmelCase : Optional[int] = [self.cls_token_id] __UpperCAmelCase : Union[str, Any] = len(cls + token_ids_a + sep ) * [0] if token_ids_a is not None: result += len(token_ids_a + sep ) * [1] return result def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[str]: '''simple docstring''' return ()
254
'''simple docstring''' _UpperCamelCase = ''' # Transformers 설치 방법 ! pip install transformers datasets # 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요. # ! pip install git+https://github.com/huggingface/transformers.git ''' _UpperCamelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] _UpperCamelCase = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
254
1
"""simple docstring""" import os def __a ( ): UpperCAmelCase_ : int = os.path.dirname(os.path.realpath(A__ ) ) UpperCAmelCase_ : str = os.path.join(A__, "triangle.txt" ) with open(A__ ) as f: UpperCAmelCase_ : Any = f.readlines() UpperCAmelCase_ : Union[str, Any] = [] for line in triangle: UpperCAmelCase_ : str = [] for number in line.strip().split(" " ): numbers_from_line.append(int(A__ ) ) a.append(A__ ) for i in range(1, len(A__ ) ): for j in range(len(a[i] ) ): UpperCAmelCase_ : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0 UpperCAmelCase_ : Any = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(A__, A__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
357
"""simple docstring""" import datasets _a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' _a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' _a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __a ( __lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
23
0
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="resnet50" , __lowerCAmelCase=3 , __lowerCAmelCase=3_2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = out_indices if out_indices is not None else [4] lowerCamelCase__ = stage_names lowerCamelCase__ = out_features lowerCamelCase__ = backbone lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = num_channels lowerCamelCase__ = use_pretrained_backbone lowerCamelCase__ = is_training def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = self.get_config() return config, pixel_values def __lowerCamelCase ( self ): '''simple docstring''' return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TimmBackbone(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() with torch.no_grad(): lowerCamelCase__ = model(__lowerCAmelCase ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch @require_timm class __A ( a_ , a_ , a_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (TimmBackbone,) if is_torch_available() else () lowerCAmelCase_ = {"feature-extraction": TimmBackbone} if is_torch_available() else {} lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TimmBackboneModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = """resnet18""" lowerCamelCase__ = """microsoft/resnet-18""" lowerCamelCase__ = AutoBackbone.from_pretrained(__lowerCAmelCase , use_timm_backbone=__lowerCAmelCase ) lowerCamelCase__ = AutoBackbone.from_pretrained(__lowerCAmelCase ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) lowerCamelCase__ = AutoBackbone.from_pretrained(__lowerCAmelCase , use_timm_backbone=__lowerCAmelCase , out_indices=[1, 2, 3] ) lowerCamelCase__ = AutoBackbone.from_pretrained(__lowerCAmelCase , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Safetensors is not supported by timm.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) lowerCamelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __lowerCAmelCase ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase__ = True lowerCamelCase__ = self.has_attentions # no need to test all models as different heads yield the same functionality lowerCamelCase__ = self.all_model_classes[0] lowerCamelCase__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) lowerCamelCase__ = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) lowerCamelCase__ = model(**__lowerCAmelCase ) lowerCamelCase__ = outputs[0][-1] # Encoder-/Decoder-only models lowerCamelCase__ = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: lowerCamelCase__ = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__lowerCAmelCase ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(**__lowerCAmelCase ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None lowerCamelCase__ = copy.deepcopy(__lowerCAmelCase ) lowerCamelCase__ = None lowerCamelCase__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(**__lowerCAmelCase ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights lowerCamelCase__ = copy.deepcopy(__lowerCAmelCase ) lowerCamelCase__ = False lowerCamelCase__ = model_class(__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() lowerCamelCase__ = model(**__lowerCAmelCase )
209
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL _A = logging.get_logger(__name__) def lowercase_ ( __UpperCAmelCase ) -> List[List[ImageInput]]: if isinstance(__UpperCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(__UpperCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(__UpperCAmelCase ): return [[videos]] raise ValueError(f"""Could not make batched video from {videos}""" ) class _lowerCamelCase ( a_ ): _lowerCamelCase :Optional[int] = ["pixel_values"] def __init__( self : Dict , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 2_55 , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : Dict , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase ) lowerCAmelCase__ : int = size if size is not None else {"""shortest_edge""": 2_56} lowerCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) lowerCAmelCase__ : Union[str, Any] = do_resize lowerCAmelCase__ : str = size lowerCAmelCase__ : str = do_center_crop lowerCAmelCase__ : Tuple = crop_size lowerCAmelCase__ : Union[str, Any] = resample lowerCAmelCase__ : Any = do_rescale lowerCAmelCase__ : Union[str, Any] = rescale_factor lowerCAmelCase__ : Dict = offset lowerCAmelCase__ : Optional[int] = do_normalize lowerCAmelCase__ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCAmelCase ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] , ) -> np.ndarray: """simple docstring""" lowerCAmelCase__ : List[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" in size: lowerCAmelCase__ : int = get_resize_output_image_size(UpperCamelCase , size["""shortest_edge"""] , default_to_square=UpperCamelCase ) elif "height" in size and "width" in size: lowerCAmelCase__ : Union[str, Any] = (size["""height"""], size["""width"""]) else: raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def _lowerCAmelCase ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray: """simple docstring""" lowerCAmelCase__ : int = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def _lowerCAmelCase ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Union[str, Any] , ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = image.astype(np.floataa ) if offset: lowerCAmelCase__ : Tuple = image - (scale / 2) return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def _lowerCAmelCase ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[int] , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray: """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) if offset and not do_rescale: raise ValueError("""For offset, do_rescale must also be set to True.""" ) # All transformations expect numpy arrays. lowerCAmelCase__ : Optional[Any] = to_numpy_array(UpperCamelCase ) if do_resize: lowerCAmelCase__ : List[str] = self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) if do_center_crop: lowerCAmelCase__ : List[str] = self.center_crop(UpperCamelCase , size=UpperCamelCase ) if do_rescale: lowerCAmelCase__ : Optional[int] = self.rescale(image=UpperCamelCase , scale=UpperCamelCase , offset=UpperCamelCase ) if do_normalize: lowerCAmelCase__ : Tuple = self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) lowerCAmelCase__ : List[str] = to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) return image def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Dict , ) -> PIL.Image.Image: """simple docstring""" lowerCAmelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Union[str, Any] = resample if resample is not None else self.resample lowerCAmelCase__ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : Dict = offset if offset is not None else self.offset lowerCAmelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std lowerCAmelCase__ : List[Any] = size if size is not None else self.size lowerCAmelCase__ : Tuple = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowerCAmelCase__ : int = make_batched(UpperCamelCase ) lowerCAmelCase__ : str = [ [ self._preprocess_image( image=UpperCamelCase , do_resize=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , do_center_crop=UpperCamelCase , crop_size=UpperCamelCase , do_rescale=UpperCamelCase , rescale_factor=UpperCamelCase , offset=UpperCamelCase , do_normalize=UpperCamelCase , image_mean=UpperCamelCase , image_std=UpperCamelCase , data_format=UpperCamelCase , ) for img in video ] for video in videos ] lowerCAmelCase__ : Dict = {"""pixel_values""": videos} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
242
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler UpperCAmelCase =16 UpperCAmelCase =32 def _A ( _a : Accelerator , _a : int = 1_6 , _a : str = "bert-base-cased" ): """simple docstring""" A = AutoTokenizer.from_pretrained(_a ) A = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(_a : int ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A = datasets.map( _a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_a ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_a : Optional[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_a , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" ) return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets["""train"""] , shuffle=_a , collate_fn=_a , batch_size=_a ) A = DataLoader( tokenized_datasets["""validation"""] , shuffle=_a , collate_fn=_a , batch_size=_a ) return train_dataloader, eval_dataloader def _A ( _a : Optional[int] , _a : Optional[int] , _a : str , _a : Union[str, Any] ): """simple docstring""" model.eval() A = 0 for step, batch in enumerate(_a ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**_a ) A = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times A , A = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(_a ) - 1: A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=_a , references=_a , ) A = metric.compute() return eval_metric["accuracy"] def _A ( _a : List[str] , _a : Optional[int] ): """simple docstring""" A = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config["""lr"""] A = int(config["""num_epochs"""] ) A = int(config["""seed"""] ) A = int(config["""batch_size"""] ) A = args.model_name_or_path set_seed(_a ) A , A = get_dataloaders(_a , _a , _a ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained(_a , return_dict=_a ) # Instantiate optimizer A = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A = optimizer_cls(params=model.parameters() , lr=_a ) if accelerator.state.deepspeed_plugin is not None: A = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: A = 1 A = (len(_a ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A = get_linear_schedule_with_warmup( optimizer=_a , num_warmup_steps=0 , num_training_steps=_a , ) else: A = DummyScheduler(_a , total_num_steps=_a , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( _a , _a , _a , _a , _a ) # We need to keep track of how many total steps we have iterated over A = 0 # We also need to keep track of the stating epoch so files are named properly A = 0 A = evaluate.load("""glue""" , """mrpc""" ) A = num_epochs if args.partial_train_epoch is not None: A = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) A = args.resume_from_checkpoint.split("""epoch_""" )[1] A = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break A = int(_a ) + 1 A = evaluation_loop(_a , _a , _a , _a ) accelerator.print("""resumed checkpoint performance:""" , _a ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f: A = json.load(_a ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model A = {} for epoch in range(_a , _a ): model.train() for step, batch in enumerate(_a ): A = model(**_a ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(_a ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 A = f'epoch_{epoch}' A = os.path.join(args.output_dir , _a ) accelerator.save_state(_a ) A = evaluation_loop(_a , _a , _a , _a ) A = accuracy A = lr_scheduler.get_lr()[0] A = optimizer.param_groups[0]["""lr"""] A = epoch A = overall_step accelerator.print(f'epoch {epoch}:' , _a ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f: json.dump(_a , _a ) def _A ( ): """simple docstring""" A = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=_a , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_a , ) parser.add_argument( """--output_dir""" , type=_a , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=_a , default=_a , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=_a , default=_a , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=_a , default=2 , help="""Number of train epochs.""" , ) A = parser.parse_args() A = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6} training_function(_a , _a ) if __name__ == "__main__": main()
367
"""simple docstring""" from __future__ import annotations import math from collections.abc import Callable def _A ( _a : Callable[[int | float], int | float] , _a : int | float , _a : int | float , _a : int = 1_0_0 , ): """simple docstring""" A = x_start A = fnc(_a ) A = 0.0 for _ in range(_a ): # Approximates curve as a sequence of linear lines and sums their length A = (x_end - x_start) / steps + xa A = fnc(_a ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step A = xa A = fxa return length if __name__ == "__main__": def _A ( _a : Tuple ): """simple docstring""" return math.sin(1_0 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") UpperCAmelCase =10 while i <= 100_000: print(f"""With {i} steps: {line_length(f, -10, 10, i)}""") i *= 10
77
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: Dict ={ 'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Tuple =['AlbertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: str =['AlbertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[int] =[ 'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'AlbertForMaskedLM', 'AlbertForMultipleChoice', 'AlbertForPreTraining', 'AlbertForQuestionAnswering', 'AlbertForSequenceClassification', 'AlbertForTokenClassification', 'AlbertModel', 'AlbertPreTrainedModel', 'load_tf_weights_in_albert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: List[str] =[ 'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAlbertForMaskedLM', 'TFAlbertForMultipleChoice', 'TFAlbertForPreTraining', 'TFAlbertForQuestionAnswering', 'TFAlbertForSequenceClassification', 'TFAlbertForTokenClassification', 'TFAlbertMainLayer', 'TFAlbertModel', 'TFAlbertPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_: Optional[Any] =[ 'FlaxAlbertForMaskedLM', 'FlaxAlbertForMultipleChoice', 'FlaxAlbertForPreTraining', 'FlaxAlbertForQuestionAnswering', 'FlaxAlbertForSequenceClassification', 'FlaxAlbertForTokenClassification', 'FlaxAlbertModel', 'FlaxAlbertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
1
'''simple docstring''' import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) SCREAMING_SNAKE_CASE_: str =logging.getLogger(__name__) @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : str a__ : str a__ : Optional[str] = None a__ : Optional[str] = None a__ : Optional[str] = None @dataclass(frozen=UpperCamelCase__ ) class __A : a__ : List[int] a__ : Optional[List[int]] = None a__ : Optional[List[int]] = None a__ : Optional[Union[int, float]] = None a__ : Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class __A ( UpperCamelCase__ ): a__ : List[InputFeatures] def __init__(self : Any , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = None , __a : Dict=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = os.path.join( __a , "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , ) UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ = cached_features_file + ".lock" with FileLock(__a ): if os.path.exists(__a ) and not overwrite_cache: logger.info(f"""Loading features from cached file {cached_features_file}""" ) UpperCAmelCase_ = torch.load(__a ) else: logger.info(f"""Creating features from dataset file at {data_dir}""" ) UpperCAmelCase_ = ( processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) ) logger.info("Training examples: %s" , len(__a ) ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) logger.info("Saving features into cached file %s" , __a ) torch.save(self.features , __a ) def __len__(self : List[Any] ): return len(self.features ) def __getitem__(self : Any , __a : Optional[Any] ): return self.features[i] def _lowercase (self : Union[str, Any] ): return self.label_list if is_tf_available(): import tensorflow as tf class __A : a__ : List[InputFeatures] def __init__(self : Union[str, Any] , __a : str , __a : PreTrainedTokenizer , __a : str , __a : Optional[int] = 128 , __a : Any=False , __a : bool = False , ): UpperCAmelCase_ = hans_processors[task]() UpperCAmelCase_ = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1] UpperCAmelCase_ = label_list UpperCAmelCase_ = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a ) UpperCAmelCase_ = hans_convert_examples_to_features(__a , __a , __a , __a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(__a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) UpperCAmelCase_ = tf.data.Dataset.from_generator( __a , ( { "example_id": tf.intaa, "input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa, }, tf.intaa, ) , ( { "example_id": tf.TensorShape([] ), "input_ids": tf.TensorShape([None, None] ), "attention_mask": tf.TensorShape([None, None] ), "token_type_ids": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def _lowercase (self : int ): return self.dataset def __len__(self : Any ): return len(self.features ) def __getitem__(self : int , __a : Union[str, Any] ): return self.features[i] def _lowercase (self : int ): return self.label_list class __A ( UpperCamelCase__ ): def _lowercase (self : List[Any] , __a : Dict ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" ) def _lowercase (self : Any , __a : List[Any] ): return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" ) def _lowercase (self : Any ): return ["contradiction", "entailment", "neutral"] def _lowercase (self : Union[str, Any] , __a : Optional[int] , __a : Union[str, Any] ): UpperCAmelCase_ = [] for i, line in enumerate(__a ): if i == 0: continue UpperCAmelCase_ = "%s-%s" % (set_type, line[0]) UpperCAmelCase_ = line[5] UpperCAmelCase_ = line[6] UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7] UpperCAmelCase_ = line[0] examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) ) return examples def lowerCAmelCase_ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = {label: i for i, label in enumerate(snake_case_ )} UpperCAmelCase_ = [] for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc="convert examples to features" ): if ex_index % 1_00_00 == 0: logger.info("Writing example %d" % (ex_index) ) UpperCAmelCase_ = tokenizer( example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding="max_length" , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , ) UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0 UpperCAmelCase_ = int(example.pairID ) features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) ) for i, example in enumerate(examples[:5] ): logger.info("*** Example ***" ) logger.info(f"""guid: {example}""" ) logger.info(f"""features: {features[i]}""" ) return features SCREAMING_SNAKE_CASE_: int ={ 'hans': 3, } SCREAMING_SNAKE_CASE_: Any ={ 'hans': HansProcessor, }
1
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase: List[str] = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: int = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: Optional[Any] = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: int = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase: int = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase: Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
336
"""simple docstring""" import cva import numpy as np class UpperCamelCase : """simple docstring""" def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ): if k in (0.04, 0.06): _lowercase : Optional[Any] = k _lowercase : Optional[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self ): return str(self.k ) def lowerCamelCase__ ( self ,UpperCAmelCase_ ): _lowercase : List[str] = cva.imread(UpperCAmelCase_ ,0 ) _lowercase , _lowercase : Dict = img.shape _lowercase : list[list[int]] = [] _lowercase : int = img.copy() _lowercase : List[str] = cva.cvtColor(UpperCAmelCase_ ,cva.COLOR_GRAY2RGB ) _lowercase , _lowercase : Optional[Any] = np.gradient(UpperCAmelCase_ ) _lowercase : Optional[int] = dx**2 _lowercase : Optional[Any] = dy**2 _lowercase : Optional[Any] = dx * dy _lowercase : List[str] = 0.04 _lowercase : Optional[Any] = self.window_size // 2 for y in range(UpperCAmelCase_ ,h - offset ): for x in range(UpperCAmelCase_ ,w - offset ): _lowercase : Optional[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Dict = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : Union[str, Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _lowercase : int = (wxx * wyy) - (wxy**2) _lowercase : Union[str, Any] = wxx + wyy _lowercase : Union[str, Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) ,0 ) color_img.itemset((y, x, 1) ,0 ) color_img.itemset((y, x, 2) ,2_55 ) return color_img, corner_list if __name__ == "__main__": UpperCAmelCase: Optional[int] = HarrisCorner(0.04, 3) UpperCAmelCase , UpperCAmelCase: List[Any] = edge_detect.detect("""path_to_image""") cva.imwrite("""detect.png""", color_img)
336
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { '''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''], '''tokenization_cpmant''': ['''CpmAntTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CpmAntForCausalLM''', '''CpmAntModel''', '''CpmAntPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
104
'''simple docstring''' import os lowerCAmelCase__ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 100, '''D''': 500, '''M''': 1000} def _A ( A__ ): """simple docstring""" __lowercase = 0 __lowercase = 0 while index < len(A__ ) - 1: __lowercase = SYMBOLS[numerals[index]] __lowercase = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def _A ( A__ ): """simple docstring""" __lowercase = '''''' __lowercase = num // 1000 numerals += m_count * "M" num %= 1000 __lowercase = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 __lowercase = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def _A ( A__ = "/p089_roman.txt" ): """simple docstring""" __lowercase = 0 with open(os.path.dirname(A__ ) + roman_numerals_filename ) as filea: __lowercase = filea.readlines() for line in lines: __lowercase = line.strip() __lowercase = parse_roman_numerals(A__ ) __lowercase = generate_roman_numerals(A__ ) savings += len(A__ ) - len(A__ ) return savings if __name__ == "__main__": print(f'{solution() = }')
104
1
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = (boundary[1] - boundary[0]) / steps SCREAMING_SNAKE_CASE__ = boundary[0] SCREAMING_SNAKE_CASE__ = boundary[1] SCREAMING_SNAKE_CASE__ = make_points(_A , _A , _A ) SCREAMING_SNAKE_CASE__ = 0.0 y += (h / 2.0) * f(_A ) for i in x_i: # print(i) y += h * f(_A ) y += (h / 2.0) * f(_A ) return y def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = a + h while x < (b - h): yield x SCREAMING_SNAKE_CASE__ = x + h def _lowercase ( UpperCamelCase_ ) -> List[Any]: # enter your function here '''simple docstring''' SCREAMING_SNAKE_CASE__ = (x - 0) * (x - 0) return y def _lowercase ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = 0.0 # Lower bound of integration SCREAMING_SNAKE_CASE__ = 1.0 # Upper bound of integration SCREAMING_SNAKE_CASE__ = 10.0 # define number of steps or resolution SCREAMING_SNAKE_CASE__ = [a, b] # define boundary of integration SCREAMING_SNAKE_CASE__ = method_a(_A , _A ) print(F'y = {y}' ) if __name__ == "__main__": main()
358
from __future__ import annotations from statistics import mean def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = [0] * no_of_processes SCREAMING_SNAKE_CASE__ = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(UpperCamelCase_ ): SCREAMING_SNAKE_CASE__ = burst_time[i] SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = -1 for i in range(UpperCamelCase_ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: SCREAMING_SNAKE_CASE__ = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: SCREAMING_SNAKE_CASE__ = i total_time += burst_time[target_process] completed += 1 SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> list[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = [0] * no_of_processes for i in range(UpperCamelCase_ ): SCREAMING_SNAKE_CASE__ = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") __snake_case = 4 __snake_case = [2, 5, 3, 7] __snake_case = [0, 0, 0, 0] __snake_case = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __snake_case = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
169
0
"""simple docstring""" import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) UpperCAmelCase_ : List[str] = logging.getLogger(__name__) def _A () -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=__a , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=__a , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=__a , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=__a , default='''data/dump''' , help='''The dump file prefix.''' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args() logger.info(f'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": SCREAMING_SNAKE_CASE_ : Dict = BertTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": SCREAMING_SNAKE_CASE_ : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : int = tokenizer.special_tokens_map['''cls_token'''] # `<s>` SCREAMING_SNAKE_CASE_ : Dict = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": SCREAMING_SNAKE_CASE_ : Any = GPTaTokenizer.from_pretrained(args.tokenizer_name ) SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` SCREAMING_SNAKE_CASE_ : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(f'Loading text from {args.file_path}' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: SCREAMING_SNAKE_CASE_ : Any = fp.readlines() logger.info('''Start encoding''' ) logger.info(f'{len(__a )} examples to process.' ) SCREAMING_SNAKE_CASE_ : int = [] SCREAMING_SNAKE_CASE_ : int = 0 SCREAMING_SNAKE_CASE_ : Optional[int] = 1_00_00 SCREAMING_SNAKE_CASE_ : Optional[Any] = time.time() for text in data: SCREAMING_SNAKE_CASE_ : Union[str, Any] = f'{bos} {text.strip()} {sep}' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(__a , add_special_tokens=__a ) rslt.append(__a ) iter += 1 if iter % interval == 0: SCREAMING_SNAKE_CASE_ : Tuple = time.time() logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) SCREAMING_SNAKE_CASE_ : Tuple = time.time() logger.info('''Finished binarization''' ) logger.info(f'{len(__a )} examples processed.' ) SCREAMING_SNAKE_CASE_ : Any = f'{args.dump_file}.{args.tokenizer_name}.pickle' SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.vocab_size if vocab_size < (1 << 16): SCREAMING_SNAKE_CASE_ : Any = [np.uintaa(__a ) for d in rslt] else: SCREAMING_SNAKE_CASE_ : List[str] = [np.intaa(__a ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'Dump to {dp_file}' ) with open(__a , '''wb''' ) as handle: pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
91
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : '''simple docstring''' def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : List[str]=13 , lowercase_ : int=7 , lowercase_ : Any=True , lowercase_ : str=True , lowercase_ : List[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Dict=99 , lowercase_ : Union[str, Any]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : Any=37 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Any=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Optional[int]=None , lowercase_ : str=1000 , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = parent SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size SCREAMING_SNAKE_CASE_ : Optional[Any] = seq_length SCREAMING_SNAKE_CASE_ : List[Any] = is_training SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask SCREAMING_SNAKE_CASE_ : Optional[Any] = use_token_type_ids SCREAMING_SNAKE_CASE_ : int = use_labels SCREAMING_SNAKE_CASE_ : List[Any] = vocab_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_size SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE_ : List[str] = num_attention_heads SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size SCREAMING_SNAKE_CASE_ : Tuple = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE_ : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE_ : Any = initializer_range SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels SCREAMING_SNAKE_CASE_ : Tuple = scope SCREAMING_SNAKE_CASE_ : Optional[int] = range_bbox def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 3] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 1] SCREAMING_SNAKE_CASE_ : str = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE_ : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE_ : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE_ : List[str] = t SCREAMING_SNAKE_CASE_ : Tuple = None if self.use_input_mask: SCREAMING_SNAKE_CASE_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) SCREAMING_SNAKE_CASE_ : Union[str, Any] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) SCREAMING_SNAKE_CASE_ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = LiltModel(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_) SCREAMING_SNAKE_CASE_ : int = model(lowercase_ , bbox=lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE_ : Optional[Any] = LiltForTokenClassification(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Tuple = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = LiltForQuestionAnswering(config=lowercase_) model.to(lowercase_) model.eval() SCREAMING_SNAKE_CASE_ : Optional[int] = model( lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ( SCREAMING_SNAKE_CASE_ ) , ) : List[str] = config_and_inputs SCREAMING_SNAKE_CASE_ : str = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __UpperCamelCase = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __UpperCamelCase = False __UpperCamelCase = False def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : str): '''simple docstring''' return True def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = LiltModelTester(self) SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , hidden_size=37) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): '''simple docstring''' self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE_ : Dict = type self.model_tester.create_and_check_model(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_) def _SCREAMING_SNAKE_CASE ( self : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE_ : Optional[int] = LiltModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) @require_torch @slow class lowerCAmelCase__ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[int]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''').to(lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.tensor([[1, 2]] , device=lowercase_) SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model(input_ids=lowercase_ , bbox=lowercase_) SCREAMING_SNAKE_CASE_ : str = torch.Size([1, 2, 768]) SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , ) self.assertTrue(outputs.last_hidden_state.shape , lowercase_) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1e-3))
91
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __a = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ['PLBartTokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST', 'PLBartForCausalLM', 'PLBartForConditionalGeneration', 'PLBartForSequenceClassification', 'PLBartModel', 'PLBartPreTrainedModel', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure)
367
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Tuple = '''time_series_transformer''' UpperCamelCase_ : Optional[Any] = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : str = "student_t" , lowerCAmelCase__ : str = "nll" , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : List[int] = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ : Optional[Union[str, bool]] = "mean" , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 3_2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : str = "gelu" , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : float = 0.1 , lowerCAmelCase__ : int = 1_0_0 , lowerCAmelCase__ : float = 0.02 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : Tuple , ) -> Tuple: """simple docstring""" _UpperCAmelCase : Optional[int] = prediction_length _UpperCAmelCase : Optional[Any] = context_length or prediction_length _UpperCAmelCase : Optional[Any] = distribution_output _UpperCAmelCase : Union[str, Any] = loss _UpperCAmelCase : Dict = input_size _UpperCAmelCase : int = num_time_features _UpperCAmelCase : Any = lags_sequence _UpperCAmelCase : Dict = scaling _UpperCAmelCase : Tuple = num_dynamic_real_features _UpperCAmelCase : Dict = num_static_real_features _UpperCAmelCase : Union[str, Any] = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : Optional[int] = cardinality else: _UpperCAmelCase : Optional[Any] = [0] if embedding_dimension and num_static_categorical_features > 0: if len(lowerCAmelCase__ ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) _UpperCAmelCase : List[Any] = embedding_dimension else: _UpperCAmelCase : Optional[Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality] _UpperCAmelCase : str = num_parallel_samples # Transformer architecture configuration _UpperCAmelCase : Union[str, Any] = input_size * len(lowerCAmelCase__ ) + self._number_of_features _UpperCAmelCase : str = d_model _UpperCAmelCase : Optional[Any] = encoder_attention_heads _UpperCAmelCase : Dict = decoder_attention_heads _UpperCAmelCase : List[Any] = encoder_ffn_dim _UpperCAmelCase : str = decoder_ffn_dim _UpperCAmelCase : Dict = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = dropout _UpperCAmelCase : str = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Dict = encoder_layerdrop _UpperCAmelCase : Any = decoder_layerdrop _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : List[str] = use_cache super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
17
0
'''simple docstring''' import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency a_ = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } a_ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def _a( UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict ={letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def _a( UpperCamelCase__ : tuple ): '''simple docstring''' return x[0] def _a( UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] =get_letter_count(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : dict[int, list[str]] ={ freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : dict[int, str] ={} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : int =''.join(freq_to_letter[freq] ) SCREAMING_SNAKE_CASE__ : Any =list(freq_to_letter_str.items() ) freq_pairs.sort(key=UpperCamelCase__, reverse=UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : list[str] =[freq_pair[1] for freq_pair in freq_pairs] return "".join(UpperCamelCase__ ) def _a( UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any =get_frequency_order(UpperCamelCase__ ) SCREAMING_SNAKE_CASE__ : str =0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
152
import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A_ ( snake_case__ ): _lowercase : int = (DPMSolverSinglestepScheduler,) _lowercase : Optional[Any] = (('num_inference_steps', 2_5),) def UpperCAmelCase ( self : Dict , **UpperCAmelCase : List[Any] ) -> Optional[Any]: __lowerCAmelCase: Union[str, Any] = { 'num_train_timesteps': 1_0_0_0, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', 'solver_order': 2, 'prediction_type': 'epsilon', 'thresholding': False, 'sample_max_value': 1.0, 'algorithm_type': 'dpmsolver++', 'solver_type': 'midpoint', 'lambda_min_clipped': -float('inf' ), 'variance_type': None, } config.update(**UpperCAmelCase ) return config def UpperCAmelCase ( self : str , UpperCAmelCase : List[Any]=0 , **UpperCAmelCase : str ) -> Any: __lowerCAmelCase: Optional[int] = dict(self.forward_default_kwargs ) __lowerCAmelCase: int = kwargs.pop('num_inference_steps' , UpperCAmelCase ) __lowerCAmelCase: int = self.dummy_sample __lowerCAmelCase: Union[str, Any] = 0.1 * sample __lowerCAmelCase: str = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase: Union[str, Any] = self.get_scheduler_config(**UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residuals __lowerCAmelCase: Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase ) __lowerCAmelCase: Dict = scheduler_class.from_pretrained(UpperCAmelCase ) new_scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residuals __lowerCAmelCase: Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase , __lowerCAmelCase: Optional[int] = sample, sample for t in range(UpperCAmelCase , time_step + scheduler.config.solver_order + 1 ): __lowerCAmelCase: str = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample __lowerCAmelCase: str = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self : str ) -> str: pass def UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase : Any=0 , **UpperCAmelCase : Optional[int] ) -> Tuple: __lowerCAmelCase: Tuple = dict(self.forward_default_kwargs ) __lowerCAmelCase: Tuple = kwargs.pop('num_inference_steps' , UpperCAmelCase ) __lowerCAmelCase: Tuple = self.dummy_sample __lowerCAmelCase: Union[str, Any] = 0.1 * sample __lowerCAmelCase: Tuple = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: __lowerCAmelCase: Dict = self.get_scheduler_config() __lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase ) scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residuals (must be after setting timesteps) __lowerCAmelCase: List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase ) __lowerCAmelCase: List[str] = scheduler_class.from_pretrained(UpperCAmelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase ) # copy over dummy past residual (must be after setting timesteps) __lowerCAmelCase: Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] __lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample __lowerCAmelCase: Dict = new_scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def UpperCAmelCase ( self : int , UpperCAmelCase : Dict=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]: if scheduler is None: __lowerCAmelCase: str = self.scheduler_classes[0] __lowerCAmelCase: int = self.get_scheduler_config(**UpperCAmelCase ) __lowerCAmelCase: Any = scheduler_class(**UpperCAmelCase ) __lowerCAmelCase: List[Any] = self.scheduler_classes[0] __lowerCAmelCase: List[str] = self.get_scheduler_config(**UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = scheduler_class(**UpperCAmelCase ) __lowerCAmelCase: List[Any] = 1_0 __lowerCAmelCase: Dict = self.dummy_model() __lowerCAmelCase: Dict = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase: Dict = model(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample return sample def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: __lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase: Any = 5_0 __lowerCAmelCase: int = self.dummy_model() __lowerCAmelCase: List[str] = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): __lowerCAmelCase: List[Any] = model(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample __lowerCAmelCase: Optional[int] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2574 ) < 1E-3 def UpperCAmelCase ( self : Optional[int] ) -> Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCAmelCase ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: # make sure that iterating over schedulers with same config names gives same results # for defaults __lowerCAmelCase: List[str] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) __lowerCAmelCase: Dict = self.full_loop(scheduler=UpperCAmelCase ) __lowerCAmelCase: Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 __lowerCAmelCase: Tuple = DEISMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Any = UniPCMultistepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __lowerCAmelCase: Union[str, Any] = self.full_loop(scheduler=UpperCAmelCase ) __lowerCAmelCase: List[Any] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def UpperCAmelCase ( self : List[str] ) -> List[str]: self.check_over_configs(thresholding=UpperCAmelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=UpperCAmelCase , prediction_type=UpperCAmelCase , sample_max_value=UpperCAmelCase , algorithm_type='dpmsolver++' , solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , ) def UpperCAmelCase ( self : Any ) -> Union[str, Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCAmelCase ) def UpperCAmelCase ( self : Tuple ) -> str: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , ) __lowerCAmelCase: Dict = self.full_loop( solver_order=UpperCAmelCase , solver_type=UpperCAmelCase , prediction_type=UpperCAmelCase , algorithm_type=UpperCAmelCase , ) assert not torch.isnan(UpperCAmelCase ).any(), "Samples have nan numbers" def UpperCAmelCase ( self : Optional[Any] ) -> str: self.check_over_configs(lower_order_final=UpperCAmelCase ) self.check_over_configs(lower_order_final=UpperCAmelCase ) def UpperCAmelCase ( self : str ) -> Any: self.check_over_configs(lambda_min_clipped=-float('inf' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def UpperCAmelCase ( self : List[Any] ) -> str: self.check_over_configs(variance_type=UpperCAmelCase ) self.check_over_configs(variance_type='learned_range' ) def UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=UpperCAmelCase , time_step=0 ) def UpperCAmelCase ( self : Any ) -> int: __lowerCAmelCase: Any = self.full_loop() __lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2791 ) < 1E-3 def UpperCAmelCase ( self : Any ) -> Union[str, Any]: __lowerCAmelCase: List[str] = self.full_loop(use_karras_sigmas=UpperCAmelCase ) __lowerCAmelCase: str = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.2248 ) < 1E-3 def UpperCAmelCase ( self : Dict ) -> Optional[Any]: __lowerCAmelCase: Tuple = self.full_loop(prediction_type='v_prediction' ) __lowerCAmelCase: List[str] = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.1453 ) < 1E-3 def UpperCAmelCase ( self : str ) -> List[str]: __lowerCAmelCase: int = self.full_loop(prediction_type='v_prediction' , use_karras_sigmas=UpperCAmelCase ) __lowerCAmelCase: Tuple = torch.mean(torch.abs(UpperCAmelCase ) ) assert abs(result_mean.item() - 0.0649 ) < 1E-3 def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: __lowerCAmelCase: Any = self.scheduler_classes[0] __lowerCAmelCase: Optional[Any] = self.get_scheduler_config(thresholding=UpperCAmelCase , dynamic_thresholding_ratio=0 ) __lowerCAmelCase: List[str] = scheduler_class(**UpperCAmelCase ) __lowerCAmelCase: Optional[int] = 1_0 __lowerCAmelCase: Union[str, Any] = self.dummy_model() __lowerCAmelCase: int = self.dummy_sample_deter.half() scheduler.set_timesteps(UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): __lowerCAmelCase: Any = model(UpperCAmelCase , UpperCAmelCase ) __lowerCAmelCase: Any = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample assert sample.dtype == torch.floataa
322
0
import json import os from datetime import date from pathlib import Path from tabulate import DataRow, TableFormat, tabulate snake_case : int = TableFormat( lineabove=None, linebelowheader=None, linebetweenrows=None, linebelow=None, headerrow=DataRow('''''', '''|''', '''|'''), datarow=DataRow('''''', '''|''', '''|'''), padding=1, with_header_hide=None, ) snake_case : List[Any] = [] snake_case : List[Any] = [] snake_case : Any = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}} snake_case : Optional[int] = [ { """type""": """header""", """text""": { """type""": """plain_text""", """text""": F"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""", """emoji""": True, }, } ] snake_case : Any = 0 for log in Path().glob('''*.log'''): snake_case : int = 0 with open(log, '''r''') as f: for line in f: snake_case : Optional[Any] = json.loads(line) if line.get('''nodeid''', '''''') != "": snake_case : Optional[Any] = line["""nodeid"""] if line.get('''duration''', None) is not None: snake_case : str = F"""{line["duration"]:.4f}""" if line.get('''outcome''', '''''') == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('''_''')[0]]) total_num_failed += 1 group_info.append([str(log), section_num_failed, failed]) snake_case : Optional[Any] = [] log.unlink() snake_case : str = """""" snake_case : int = [] if total_num_failed > 0: for name, num_failed, failed_tests in group_info: if num_failed > 0: if num_failed == 1: message += F"*{name[1:]}: {num_failed} failed test*\n" else: message += F"*{name[1:]}: {num_failed} failed tests*\n" snake_case : Optional[Any] = [] snake_case : List[str] = {} for test in failed_tests: snake_case : Dict = test[0].split('''::''') snake_case : Optional[int] = data[0].split('''/''')[-1] if data[0] not in filesafailed: snake_case : Union[str, Any] = [data[1:]] else: filesafailed[data[0]] += [data[1:]] failed_table.append(data) snake_case : Tuple = [test[0] for test in failed_table] snake_case : Optional[Any] = list(set(files)) # Count number of instances in failed_tests snake_case : Tuple = [] for file in individual_files: table.append([file, len(filesafailed[file])]) snake_case : Tuple = tabulate( table, headers=['''Test Location''', '''Num Failed'''], tablefmt=hf_table_format, stralign='''right''', ) message += F"\n```\n{failed_table}\n```" all_filesafailed.append(filesafailed) if len(message) > 30_00: snake_case : Dict = """Too many failed tests, please see the full report in the Action results.""" snake_case : Dict = len(err) + 10 snake_case : Dict = message[: 30_00 - offset] + F"""\n...\n```\n{err}""" print(F"""### {message}""") else: snake_case : Tuple = """No failed tests! 🤗""" print(F"""## {message}""") payload.append(no_error_payload) if os.environ.get('''TEST_TYPE''', '''''') != "": from slack_sdk import WebClient snake_case : int = WebClient(token=os.environ['''SLACK_API_TOKEN''']) if message != "No failed tests! 🤗": snake_case : int = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": message, }, } payload.append(md_report) snake_case : List[Any] = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": """*For more details:*""", }, """accessory""": { """type""": """button""", """text""": { """type""": """plain_text""", """text""": """Check Action results""", """emoji""": True, }, """url""": F"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""", }, } payload.append(action_button) snake_case : Optional[int] = { """type""": """context""", """elements""": [ { """type""": """plain_text""", """text""": F"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""", } ], } payload.append(date_report) snake_case : Dict = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload) snake_case : Union[str, Any] = response.data["""ts"""] for failed_file in all_filesafailed: for test_location, test_failures in failed_file.items(): # Keep only the first instance of the test name snake_case : Dict = """""" for i, row in enumerate(test_failures): if row[0] != test_class: snake_case : List[Any] = row[0] else: snake_case : int = """""" snake_case : List[str] = { """type""": """section""", """text""": { """type""": """mrkdwn""", """text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""", }, } client.chat_postMessage( channel='''#accelerate-ci-daily''', thread_ts=ts, blocks=[payload], )
364
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _snake_case ( _snake_case , unittest.TestCase ): SCREAMING_SNAKE_CASE__ = MgpstrTokenizer SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = False def SCREAMING_SNAKE_CASE__ ( self ): super().setUp() # fmt: off a :int = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on a :List[str] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) a :Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '''\n''' ) def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :str = '''tester''' a :Union[str, Any] = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass def SCREAMING_SNAKE_CASE__ ( self ): a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): a :Any = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) a :str = tokenizer.encode([special_token] , add_special_tokens=_lowerCamelCase ) self.assertEqual(len(_lowerCamelCase ) , 1 ) a :Tuple = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase ) self.assertTrue(special_token not in decoded ) def SCREAMING_SNAKE_CASE__ ( self ): a :Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): a , a :Tuple = self.get_input_output_texts(_lowerCamelCase ) a :Tuple = tokenizer.tokenize(_lowerCamelCase ) a :int = tokenizer.convert_tokens_to_ids(_lowerCamelCase ) a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) a :Any = tokenizer.convert_ids_to_tokens(_lowerCamelCase ) self.assertNotEqual(len(_lowerCamelCase ) , 0 ) a :str = tokenizer.decode(_lowerCamelCase ) self.assertIsInstance(_lowerCamelCase , _lowerCamelCase ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowerCamelCase ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def SCREAMING_SNAKE_CASE__ ( self ): pass
281
0
"""simple docstring""" __lowerCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ): A__ = F'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(snake_case__ ) A__ = ''.join(bin(snake_case__ )[2:].zfill(8 ) for byte in data ) A__ = len(snake_case__ ) % 6 != 0 if padding_needed: # The padding that will be added later A__ = b'=' * ((6 - len(snake_case__ ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(snake_case__ ) % 6) else: A__ = b'' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(snake_case__ ) , 6 ) ).encode() + padding ) def UpperCAmelCase ( UpperCamelCase__ ): """simple docstring""" if not isinstance(snake_case__ , snake_case__ ) and not isinstance(snake_case__ , snake_case__ ): A__ = ( 'argument should be a bytes-like object or ASCII string, ' F'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(snake_case__ ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(snake_case__ , snake_case__ ): try: A__ = encoded_data.decode('utf-8' ) except UnicodeDecodeError: raise ValueError('base64 encoded data should only contain ASCII characters' ) A__ = encoded_data.count('=' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(snake_case__ ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one A__ = encoded_data[:-padding] A__ = ''.join( bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: A__ = ''.join( bin(B64_CHARSET.index(snake_case__ ) )[2:].zfill(6 ) for char in encoded_data ) A__ = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(snake_case__ ) , 8 ) ] return bytes(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
221
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def _snake_case ( snake_case__ : Dict ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', '_float_tensor', 'decoder.output_projection.weight', ] for k in ignore_keys: state_dict.pop(snake_case__ , snake_case__ ) def _snake_case ( snake_case__ : int ): A , A = emb.weight.shape A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ ) A = emb.weight.data return lin_layer def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ): A = torch.load(snake_case__ , map_location='cpu' )['model'] remove_ignore_keys_(snake_case__ ) A = state_dict['encoder.embed_tokens.weight'].shape[0] A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ ) if mbart_aa and finetuned: A = 'relu' A = state_dict['decoder.embed_tokens.weight'] A = MBartForConditionalGeneration(snake_case__ ) model.model.load_state_dict(snake_case__ ) if finetuned: A = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') _lowercase = parser.parse_args() _lowercase = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
74
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor __snake_case = logging.get_logger(__name__) class lowercase ( A__ ): """simple docstring""" def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): '''simple docstring''' warnings.warn( '''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use PoolFormerImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
219
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer __snake_case = logging.get_logger(__name__) __snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __snake_case = { '''vocab_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt''' ), '''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''', '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''', '''distilbert-base-uncased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''', '''distilbert-base-cased-distilled-squad''': ( '''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json''' ), '''distilbert-base-german-cased''': ( '''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json''' ), '''distilbert-base-multilingual-cased''': ( '''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json''' ), }, } __snake_case = { '''distilbert-base-uncased''': 512, '''distilbert-base-uncased-distilled-squad''': 512, '''distilbert-base-cased''': 512, '''distilbert-base-cased-distilled-squad''': 512, '''distilbert-base-german-cased''': 512, '''distilbert-base-multilingual-cased''': 512, } __snake_case = { '''distilbert-base-uncased''': {'''do_lower_case''': True}, '''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True}, '''distilbert-base-cased''': {'''do_lower_case''': False}, '''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False}, '''distilbert-base-german-cased''': {'''do_lower_case''': False}, '''distilbert-base-multilingual-cased''': {'''do_lower_case''': False}, } class lowercase ( A__ ): """simple docstring""" _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = PRETRAINED_INIT_CONFIGURATION _a = ['input_ids', 'attention_mask'] _a = DistilBertTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ): '''simple docstring''' super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase__ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): UpperCamelCase__ :int = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) UpperCamelCase__ :Optional[Any] = do_lower_case UpperCamelCase__ :Optional[Any] = strip_accents UpperCamelCase__ :List[Any] = tokenize_chinese_chars UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ ) UpperCamelCase__ :int = do_lower_case def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ): '''simple docstring''' UpperCamelCase__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :List[str] = [self.sep_token_id] UpperCamelCase__ :List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ): '''simple docstring''' UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
219
1
"""simple docstring""" from __future__ import annotations import math def a__ ( snake_case__ , snake_case__ ) -> list: if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2: raise Exception("""Matrices are not 2x2""" ) lowerCamelCase = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def a__ ( snake_case__ , snake_case__ ) -> Union[str, Any]: return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(snake_case__ ) ) ] def a__ ( snake_case__ , snake_case__ ) -> List[str]: return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(snake_case__ ) ) ] def a__ ( snake_case__ ) -> tuple[list, list, list, list]: if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception("""Odd matrices are not supported!""" ) lowerCamelCase = len(snake_case__ ) lowerCamelCase = matrix_length // 2 lowerCamelCase = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )] lowerCamelCase = [ [a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ ) ] lowerCamelCase = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )] lowerCamelCase = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )] return top_left, top_right, bot_left, bot_right def a__ ( snake_case__ ) -> tuple[int, int]: return len(snake_case__ ), len(matrix[0] ) def a__ ( snake_case__ ) -> None: print("""\n""".join(str(snake_case__ ) for line in matrix ) ) def a__ ( snake_case__ , snake_case__ ) -> list: if matrix_dimensions(snake_case__ ) == (2, 2): return default_matrix_multiplication(snake_case__ , snake_case__ ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = split_matrix(snake_case__ ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = split_matrix(snake_case__ ) lowerCamelCase = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) ) lowerCamelCase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) lowerCamelCase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) lowerCamelCase = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) ) lowerCamelCase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) lowerCamelCase = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) lowerCamelCase = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) lowerCamelCase = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ ) lowerCamelCase = matrix_addition(snake_case__ , snake_case__ ) lowerCamelCase = matrix_addition(snake_case__ , snake_case__ ) lowerCamelCase = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ ) # construct the new matrix from our 4 quadrants lowerCamelCase = [] for i in range(len(snake_case__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(snake_case__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def a__ ( snake_case__ , snake_case__ ) -> list: if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]: lowerCamelCase = ( """Unable to multiply these matrices, please check the dimensions.\n""" F'Matrix A: {matrixa}\n' F'Matrix B: {matrixa}' ) raise Exception(snake_case__ ) lowerCamelCase = matrix_dimensions(snake_case__ ) lowerCamelCase = matrix_dimensions(snake_case__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] lowerCamelCase = max(*snake_case__ , *snake_case__ ) lowerCamelCase = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) ) lowerCamelCase = matrixa lowerCamelCase = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , snake_case__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) lowerCamelCase = actual_strassen(snake_case__ , snake_case__ ) # Removing the additional zeros for i in range(0 , snake_case__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": lowerCAmelCase : Optional[int] = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] lowerCAmelCase : Optional[Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
291
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def a__ ( ) -> Union[str, Any]: lowerCamelCase = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=snake_case__ , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=snake_case__ ) return parser.parse_args() def a__ ( ) -> List[str]: lowerCamelCase = parse_args() # Import training_script as a module. lowerCamelCase = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCamelCase = script_fpath.stem lowerCamelCase = importlib.import_module(snake_case__ ) # Patch sys.argv lowerCamelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
291
1
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Dict = '''Speech2TextFeatureExtractor''' __UpperCAmelCase : Optional[Any] = '''Speech2TextTokenizer''' def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: '''simple docstring''' super().__init__(UpperCamelCase__ , UpperCamelCase__ ) snake_case : List[str] = self.feature_extractor snake_case : str = False def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) snake_case : List[str] = kwargs.pop("raw_speech" ) else: snake_case : Any = kwargs.pop("audio" , UpperCamelCase__ ) snake_case : str = kwargs.pop("sampling_rate" , UpperCamelCase__ ) snake_case : Union[str, Any] = kwargs.pop("text" , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: snake_case : str = args[0] snake_case : List[str] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: snake_case : str = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: snake_case : Tuple = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif audio is None: return encodings else: snake_case : List[Any] = encodings["input_ids"] return inputs def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple: '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any: '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @contextmanager def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) snake_case : Optional[int] = True snake_case : List[Any] = self.tokenizer yield snake_case : int = self.feature_extractor snake_case : Dict = False
112
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Tuple = '''altclip_text_model''' def __init__( self , UpperCamelCase__=25_0002 , UpperCamelCase__=1024 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=4096 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=514 , UpperCamelCase__=1 , UpperCamelCase__=0.02 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-05 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=True , UpperCamelCase__=768 , **UpperCamelCase__ , ) -> Optional[int]: '''simple docstring''' super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) snake_case : Any = vocab_size snake_case : List[Any] = hidden_size snake_case : Optional[int] = num_hidden_layers snake_case : Optional[Any] = num_attention_heads snake_case : Dict = hidden_act snake_case : Dict = intermediate_size snake_case : int = hidden_dropout_prob snake_case : Optional[int] = attention_probs_dropout_prob snake_case : Union[str, Any] = max_position_embeddings snake_case : Optional[int] = type_vocab_size snake_case : Dict = initializer_range snake_case : int = initializer_factor snake_case : Union[str, Any] = layer_norm_eps snake_case : List[Any] = position_embedding_type snake_case : Any = use_cache snake_case : str = project_dim class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : Tuple = '''altclip_vision_model''' def __init__( self , UpperCamelCase__=768 , UpperCamelCase__=3072 , UpperCamelCase__=512 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3 , UpperCamelCase__=224 , UpperCamelCase__=32 , UpperCamelCase__="quick_gelu" , UpperCamelCase__=1e-5 , UpperCamelCase__=0.0 , UpperCamelCase__=0.02 , UpperCamelCase__=1.0 , **UpperCamelCase__ , ) -> str: '''simple docstring''' super().__init__(**UpperCamelCase__ ) snake_case : Optional[int] = hidden_size snake_case : str = intermediate_size snake_case : List[str] = projection_dim snake_case : Optional[Any] = num_hidden_layers snake_case : Optional[int] = num_attention_heads snake_case : str = num_channels snake_case : List[str] = patch_size snake_case : List[Any] = image_size snake_case : Union[str, Any] = initializer_range snake_case : Optional[Any] = initializer_factor snake_case : Any = attention_dropout snake_case : Dict = layer_norm_eps snake_case : List[str] = hidden_act @classmethod def lowerCamelCase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase__ ) snake_case ,snake_case : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": snake_case : Optional[Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class _lowerCAmelCase ( snake_case_ ): __UpperCAmelCase : str = '''altclip''' __UpperCAmelCase : Optional[Any] = True def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=768 , UpperCamelCase__=2.6592 , **UpperCamelCase__ ) -> Any: '''simple docstring''' snake_case : List[str] = kwargs.pop("text_config_dict" , UpperCamelCase__ ) snake_case : Union[str, Any] = kwargs.pop("vision_config_dict" , UpperCamelCase__ ) super().__init__(**UpperCamelCase__ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: snake_case : List[str] = {} # This is the complete result when using `text_config_dict`. snake_case : Dict = AltCLIPTextConfig(**UpperCamelCase__ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: snake_case : Optional[Any] = ( F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. ' F'The value `text_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: snake_case : Any = ( F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ' F'value `text_config["{key}"]` will be overriden.' ) logger.warning(UpperCamelCase__ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: snake_case : Union[str, Any] = {} # This is the complete result when using `vision_config_dict`. snake_case : int = AltCLIPVisionConfig(**UpperCamelCase__ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: snake_case : Optional[int] = { str(UpperCamelCase__ ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: snake_case : int = ( F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different ' F'values. The value `vision_config_dict["{key}"]` will be used instead.' ) # If inferred from default argument values (just to be super careful) else: snake_case : Optional[Any] = ( F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ' F'The value `vision_config["{key}"]` will be overriden.' ) logger.warning(UpperCamelCase__ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: snake_case : Optional[int] = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: snake_case : Dict = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) snake_case : Dict = AltCLIPTextConfig(**UpperCamelCase__ ) snake_case : Tuple = AltCLIPVisionConfig(**UpperCamelCase__ ) snake_case : int = projection_dim snake_case : List[str] = logit_scale_init_value snake_case : int = 1.0 @classmethod def lowerCamelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ ) def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' snake_case : Tuple = copy.deepcopy(self.__dict__ ) snake_case : Optional[int] = self.text_config.to_dict() snake_case : str = self.vision_config.to_dict() snake_case : Optional[int] = self.__class__.model_type return output
112
1
import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger __lowercase = get_logger(__name__) class lowerCamelCase_ ( enum.Enum ): '''simple docstring''' a__ : Union[str, Any] = """all_checks""" a__ : List[Any] = """basic_checks""" a__ : List[Any] = """no_checks""" class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ): '''simple docstring''' if expected_checksums is None: logger.info('''Unable to verify checksums.''' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __UpperCamelCase :int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] __UpperCamelCase :Any = ''' for ''' + verification_name if verification_name is not None else '''''' if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingChecksumError( f"""Checksums didn't match{for_verification_name}:\n""" f"""{bad_urls}\n""" '''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' ) logger.info('''All the checksums matched successfully''' + for_verification_name ) class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if expected_splits is None: logger.info('''Unable to verify splits sizes.''' ) return if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0: raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) ) __UpperCamelCase :Union[str, Any] = [ {'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(SCREAMING_SNAKE_CASE ) > 0: raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) ) logger.info('''All the splits matched successfully.''' ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True ): '''simple docstring''' if record_checksum: __UpperCamelCase :List[Any] = shaaaa() with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , B'''''' ): m.update(SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[Any] = m.hexdigest() else: __UpperCamelCase :Optional[int] = None return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum} def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
43
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
0
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = FileLock(str(tmpdir / "foo.lock" ) ) UpperCAmelCase_ : Tuple = FileLock(str(tmpdir / "foo.lock" ) ) UpperCAmelCase_ : Any = 0.01 with locka.acquire(): with pytest.raises(_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : Dict = time.time() locka.acquire(_SCREAMING_SNAKE_CASE ) assert time.time() - _start > timeout def __a ( __lowerCamelCase ): UpperCAmelCase_ : Dict = "a" * 1000 + ".lock" UpperCAmelCase_ : Optional[int] = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(_SCREAMING_SNAKE_CASE ) assert len(os.path.basename(locka._lock_file ) ) <= 255 UpperCAmelCase_ : Any = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(_SCREAMING_SNAKE_CASE ): locka.acquire(0 )
350
"""simple docstring""" import string # frequency taken from https://en.wikipedia.org/wiki/Letter_frequency _a = { 'E': 12.70, 'T': 9.06, 'A': 8.17, 'O': 7.51, 'I': 6.97, 'N': 6.75, 'S': 6.33, 'H': 6.09, 'R': 5.99, 'D': 4.25, 'L': 4.03, 'C': 2.78, 'U': 2.76, 'M': 2.41, 'W': 2.36, 'F': 2.23, 'G': 2.02, 'Y': 1.97, 'P': 1.93, 'B': 1.29, 'V': 0.98, 'K': 0.77, 'J': 0.15, 'X': 0.15, 'Q': 0.10, 'Z': 0.07, } _a = 'ETAOINSHRDLCUMWFGYPBVKJXQZ' _a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = {letter: 0 for letter in string.ascii_uppercase} for letter in message.upper(): if letter in LETTERS: letter_count[letter] += 1 return letter_count def __a ( __lowerCamelCase ): return x[0] def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_letter_count(__lowerCamelCase ) UpperCAmelCase_ : dict[int, list[str]] = { freq: [] for letter, freq in letter_to_freq.items() } for letter in LETTERS: freq_to_letter[letter_to_freq[letter]].append(__lowerCamelCase ) UpperCAmelCase_ : dict[int, str] = {} for freq in freq_to_letter: freq_to_letter[freq].sort(key=ETAOIN.find, reverse=__lowerCamelCase ) UpperCAmelCase_ : Any = "".join(freq_to_letter[freq] ) UpperCAmelCase_ : str = list(freq_to_letter_str.items() ) freq_pairs.sort(key=__lowerCamelCase, reverse=__lowerCamelCase ) UpperCAmelCase_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs] return "".join(__lowerCamelCase ) def __a ( __lowerCamelCase ): UpperCAmelCase_ : Any = get_frequency_order(__lowerCamelCase ) UpperCAmelCase_ : int = 0 for common_letter in ETAOIN[:6]: if common_letter in freq_order[:6]: match_score += 1 for uncommon_letter in ETAOIN[-6:]: if uncommon_letter in freq_order[-6:]: match_score += 1 return match_score if __name__ == "__main__": import doctest doctest.testmod()
23
0
from typing import Any def a__ ( snake_case ): """simple docstring""" if not input_list: return [] __SCREAMING_SNAKE_CASE : int = [input_list.count(snake_case ) for value in input_list] __SCREAMING_SNAKE_CASE : Any = max(snake_case ) # Gets the maximum count in the input list. # Gets values of modes return sorted({input_list[i] for i, value in enumerate(snake_case ) if value == y} ) if __name__ == "__main__": import doctest doctest.testmod()
303
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) lowercase_ = logging.getLogger(__name__) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser( description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' ) parser.add_argument('''--file_path''' , type=snake_case , default='''data/dump.txt''' , help='''The path to the data.''' ) parser.add_argument('''--tokenizer_type''' , type=snake_case , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] ) parser.add_argument('''--tokenizer_name''' , type=snake_case , default='''bert-base-uncased''' , help='''The tokenizer to use.''' ) parser.add_argument('''--dump_file''' , type=snake_case , default='''data/dump''' , help='''The dump file prefix.''' ) __SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": __SCREAMING_SNAKE_CASE : Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]` __SCREAMING_SNAKE_CASE : List[str] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]` elif args.tokenizer_type == "roberta": __SCREAMING_SNAKE_CASE : List[str] = RobertaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''cls_token'''] # `<s>` __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>` elif args.tokenizer_type == "gpt2": __SCREAMING_SNAKE_CASE : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>` __SCREAMING_SNAKE_CASE : str = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>` logger.info(F'''Loading text from {args.file_path}''' ) with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp: __SCREAMING_SNAKE_CASE : str = fp.readlines() logger.info('''Start encoding''' ) logger.info(F'''{len(snake_case )} examples to process.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : Dict = 0 __SCREAMING_SNAKE_CASE : List[str] = 10_000 __SCREAMING_SNAKE_CASE : Dict = time.time() for text in data: __SCREAMING_SNAKE_CASE : Optional[int] = F'''{bos} {text.strip()} {sep}''' __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case ) rslt.append(snake_case ) iter += 1 if iter % interval == 0: __SCREAMING_SNAKE_CASE : List[str] = time.time() logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = time.time() logger.info('''Finished binarization''' ) logger.info(F'''{len(snake_case )} examples processed.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{args.dump_file}.{args.tokenizer_name}.pickle''' __SCREAMING_SNAKE_CASE : str = tokenizer.vocab_size if vocab_size < (1 << 16): __SCREAMING_SNAKE_CASE : List[str] = [np.uintaa(snake_case ) for d in rslt] else: __SCREAMING_SNAKE_CASE : Optional[int] = [np.intaa(snake_case ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'''Dump to {dp_file}''' ) with open(snake_case , '''wb''' ) as handle: pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
303
1
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A_ ( __UpperCamelCase ): '''simple docstring''' def _snake_case ( self: Dict ): __lowerCamelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(a , 'width_multiplier' ) ) class A_ : '''simple docstring''' def __init__( self: Dict , a: str , a: Tuple=13 , a: List[Any]=64 , a: List[str]=2 , a: Any=3 , a: Union[str, Any]="swish" , a: Any=3 , a: Union[str, Any]=32 , a: int=0.1 , a: Tuple=0.0_2 , a: Optional[Any]=True , a: Any=True , a: int=10 , a: int=None , a: Optional[Any]=0.2_5 , a: str=0.0 , a: Optional[Any]=0.0 , ): __lowerCamelCase : int = parent __lowerCamelCase : List[Any] = batch_size __lowerCamelCase : str = image_size __lowerCamelCase : List[Any] = patch_size __lowerCamelCase : int = num_channels __lowerCamelCase : Dict = make_divisible(512 * width_multiplier , divisor=8 ) __lowerCamelCase : Optional[int] = hidden_act __lowerCamelCase : int = conv_kernel_size __lowerCamelCase : List[str] = output_stride __lowerCamelCase : Tuple = classifier_dropout_prob __lowerCamelCase : Dict = use_labels __lowerCamelCase : Any = is_training __lowerCamelCase : List[Any] = num_labels __lowerCamelCase : Optional[int] = initializer_range __lowerCamelCase : str = scope __lowerCamelCase : List[Any] = width_multiplier __lowerCamelCase : str = ffn_dropout __lowerCamelCase : int = attn_dropout def _snake_case ( self: Dict ): __lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCamelCase : List[Any] = None __lowerCamelCase : List[Any] = None if self.use_labels: __lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels ) __lowerCamelCase : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __lowerCamelCase : Any = self.get_config() return config, pixel_values, labels, pixel_labels def _snake_case ( self: Dict ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def _snake_case ( self: List[str] , a: Dict , a: int , a: Any , a: str ): __lowerCamelCase : List[str] = MobileViTVaModel(config=a ) model.to(a ) model.eval() __lowerCamelCase : Union[str, Any] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _snake_case ( self: str , a: List[Any] , a: Tuple , a: Union[str, Any] , a: List[str] ): __lowerCamelCase : Any = self.num_labels __lowerCamelCase : Tuple = MobileViTVaForImageClassification(a ) model.to(a ) model.eval() __lowerCamelCase : int = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self: Tuple , a: List[Any] , a: Tuple , a: str , a: str ): __lowerCamelCase : int = self.num_labels __lowerCamelCase : Tuple = MobileViTVaForSemanticSegmentation(a ) model.to(a ) model.eval() __lowerCamelCase : Tuple = model(a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __lowerCamelCase : List[str] = model(a , labels=a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _snake_case ( self: int ): __lowerCamelCase : List[Any] = self.prepare_config_and_inputs() __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = config_and_inputs __lowerCamelCase : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): '''simple docstring''' __snake_case = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) __snake_case = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) __snake_case = False __snake_case = False __snake_case = False __snake_case = False def _snake_case ( self: Any ): __lowerCamelCase : List[Any] = MobileViTVaModelTester(self ) __lowerCamelCase : Any = MobileViTVaConfigTester(self , config_class=a , has_text_modality=a ) def _snake_case ( self: List[Any] ): self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def _snake_case ( self: List[str] ): pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def _snake_case ( self: str ): pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def _snake_case ( self: List[Any] ): pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def _snake_case ( self: str ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _snake_case ( self: Optional[int] ): pass def _snake_case ( self: Tuple ): __lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Union[str, Any] = model_class(a ) __lowerCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()] __lowerCamelCase : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _snake_case ( self: List[Any] ): __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _snake_case ( self: Union[str, Any] ): def check_hidden_states_output(a: Optional[Any] , a: Optional[int] , a: Any ): __lowerCamelCase : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): __lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(a , a ) ) __lowerCamelCase : str = outputs.hidden_states __lowerCamelCase : List[Any] = 5 self.assertEqual(len(a ) , a ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __lowerCamelCase : int = 2 for i in range(len(a ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCamelCase : Union[str, Any] = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCamelCase : str = True check_hidden_states_output(a , a , a ) def _snake_case ( self: Dict ): __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) def _snake_case ( self: List[Any] ): __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a ) @slow def _snake_case ( self: str ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Optional[Any] = MobileViTVaModel.from_pretrained(a ) self.assertIsNotNone(a ) def UpperCamelCase__ ( ): __lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _snake_case ( self: Optional[Any] ): return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def _snake_case ( self: Union[str, Any] ): __lowerCamelCase : str = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( a ) __lowerCamelCase : List[str] = self.default_image_processor __lowerCamelCase : Dict = prepare_img() __lowerCamelCase : Union[str, Any] = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): __lowerCamelCase : Any = model(**a ) # verify the logits __lowerCamelCase : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , a ) __lowerCamelCase : List[str] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _snake_case ( self: Optional[Any] ): __lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase : Optional[int] = model.to(a ) __lowerCamelCase : Tuple = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase : Tuple = prepare_img() __lowerCamelCase : Any = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): __lowerCamelCase : Union[str, Any] = model(**a ) __lowerCamelCase : Union[str, Any] = outputs.logits # verify the logits __lowerCamelCase : Tuple = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , a ) __lowerCamelCase : Optional[Any] = torch.tensor( [ [[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]], [[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]], [[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]], ] , device=a , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a , atol=1e-4 ) ) @slow def _snake_case ( self: List[Any] ): __lowerCamelCase : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase : Optional[int] = model.to(a ) __lowerCamelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) __lowerCamelCase : Union[str, Any] = prepare_img() __lowerCamelCase : List[Any] = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): __lowerCamelCase : int = model(**a ) __lowerCamelCase : Union[str, Any] = outputs.logits.detach().cpu() __lowerCamelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(50, 60)] ) __lowerCamelCase : Optional[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , a ) __lowerCamelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=a ) __lowerCamelCase : Union[str, Any] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , a )
194
import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Load configuration defined in the metadata file with open(SCREAMING_SNAKE_CASE__ ) as metadata_file: __lowerCamelCase : List[str] = json.load(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : int = LukeConfig(use_entity_aware_attention=SCREAMING_SNAKE_CASE__ , **metadata['model_config'] ) # Load in the weights from the checkpoint_path __lowerCamelCase : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' ) # Load the entity vocab file __lowerCamelCase : List[Any] = load_entity_vocab(SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] ) # Add special tokens to the token vocabulary for downstream tasks __lowerCamelCase : str = AddedToken('<ent>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : Dict = AddedToken('<ent2>' , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ ) with open(os.path.join(SCREAMING_SNAKE_CASE__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f: json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __lowerCamelCase : str = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) # Initialize the embeddings of the special tokens __lowerCamelCase : Union[str, Any] = state_dict['embeddings.word_embeddings.weight'] __lowerCamelCase : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 ) __lowerCamelCase : Any = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 ) __lowerCamelCase : List[str] = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __lowerCamelCase : Optional[int] = f'encoder.layer.{layer_index}.attention.self.' __lowerCamelCase : Dict = state_dict[prefix + matrix_name] __lowerCamelCase : List[Any] = state_dict[prefix + matrix_name] __lowerCamelCase : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __lowerCamelCase : Optional[int] = state_dict['entity_embeddings.entity_embeddings.weight'] __lowerCamelCase : Union[str, Any] = entity_emb[entity_vocab['[MASK]']] __lowerCamelCase : Optional[Any] = LukeModel(config=SCREAMING_SNAKE_CASE__ ).eval() __lowerCamelCase , __lowerCamelCase : List[Any] = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ ) if not (len(SCREAMING_SNAKE_CASE__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f'Missing keys {", ".join(SCREAMING_SNAKE_CASE__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )): raise ValueError( 'Unexpected keys' f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs __lowerCamelCase : Optional[Any] = LukeTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , task='entity_classification' ) __lowerCamelCase : Dict = ( 'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the' ' new world number one avoid a humiliating second- round exit at Wimbledon .' ) __lowerCamelCase : Union[str, Any] = (39, 42) __lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , entity_spans=[span] , add_prefix_space=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ) __lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE__ ) # Verify word hidden states if model_size == "large": __lowerCamelCase : Dict = torch.Size((1, 42, 1_024) ) __lowerCamelCase : int = torch.tensor( [[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] ) else: # base __lowerCamelCase : Union[str, Any] = torch.Size((1, 42, 768) ) __lowerCamelCase : Tuple = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __lowerCamelCase : Union[str, Any] = torch.Size((1, 1, 1_024) ) __lowerCamelCase : Dict = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] ) else: # base __lowerCamelCase : int = torch.Size((1, 1, 768) ) __lowerCamelCase : Dict = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('Saving PyTorch model to {}'.format(SCREAMING_SNAKE_CASE__ ) ) model.save_pretrained(SCREAMING_SNAKE_CASE__ ) def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ): __lowerCamelCase : Tuple = {} with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f: for index, line in enumerate(SCREAMING_SNAKE_CASE__ ): __lowerCamelCase , __lowerCamelCase : List[Any] = line.rstrip().split('\t' ) __lowerCamelCase : Any = index return entity_vocab if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) lowercase_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
194
1
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : List[Any] = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS} def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Optional[int] , lowercase : List[str] , lowercase : Union[str, Any] ): '''simple docstring''' if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" ) if tokenizer_name is None: lowerCamelCase_ = TOKENIZER_CLASSES else: lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )} logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" ) for tokenizer_name in tokenizer_names: lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name] lowerCamelCase_ = True if checkpoint_name is None: lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() ) else: lowerCamelCase_ = [checkpoint_name] logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" ) for checkpoint in checkpoint_names: logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" ) # Load tokenizer lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase ) # Save fast tokenizer logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" ) # For organization names we create sub-directories if "/" in checkpoint: lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' ) lowerCamelCase_ = os.path.join(lowercase , lowercase ) elif add_prefix: lowerCamelCase_ = checkpoint lowerCamelCase_ = dump_path else: lowerCamelCase_ = None lowerCamelCase_ = dump_path logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]: lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint] lowerCamelCase_ = file_path.split(lowercase )[-1][0] if next_char == "/": lowerCamelCase_ = os.path.join(lowercase , lowercase ) lowerCamelCase_ = None logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" ) lowerCamelCase_ = tokenizer.save_pretrained( lowercase , legacy_format=lowercase , filename_prefix=lowercase ) logger.info(f"""=> File names {file_names}""" ) for file_name in file_names: if not file_name.endswith('tokenizer.json' ): os.remove(lowercase ) logger.info(f"""=> removing {file_name}""" ) if __name__ == "__main__": lowerCamelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files." ) parser.add_argument( "--tokenizer_name", default=None, type=str, help=( F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """ "download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--checkpoint_name", default=None, type=str, help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.", ) parser.add_argument( "--force_download", action="store_true", help="Re-download checkpoints.", ) lowerCamelCase : Optional[Any] = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
204
def _SCREAMING_SNAKE_CASE ( lowercase : list ): '''simple docstring''' for i in range(len(lowercase ) - 1 , 0 , -1 ): lowerCamelCase_ = False for j in range(lowercase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowerCamelCase_ , lowerCamelCase_ = unsorted[j - 1], unsorted[j] lowerCamelCase_ = True for j in range(lowercase ): if unsorted[j] > unsorted[j + 1]: lowerCamelCase_ , lowerCamelCase_ = unsorted[j + 1], unsorted[j] lowerCamelCase_ = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase : Any = input("Enter numbers separated by a comma:\n").strip() lowerCamelCase : Dict = [int(item) for item in user_input.split(",")] print(F"""{cocktail_shaker_sort(unsorted) = }""")
204
1
from jiwer import compute_measures import datasets lowerCAmelCase_ = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' lowerCAmelCase_ = '''\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. ''' lowerCAmelCase_ = ''' Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> wer = datasets.load_metric("wer") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def lowerCamelCase (self ) -> Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/Word_error_rate''', ] , ) def lowerCamelCase (self , __magic_name__=None , __magic_name__=None , __magic_name__=False ) -> Tuple: '''simple docstring''' if concatenate_texts: return compute_measures(__magic_name__ , __magic_name__ )["wer"] else: snake_case_ : int = 0 snake_case_ : str = 0 for prediction, reference in zip(__magic_name__ , __magic_name__ ): snake_case_ : List[str] = compute_measures(__magic_name__ , __magic_name__ ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
279
import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class __lowerCAmelCase ( unittest.TestCase ): def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=7 , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=True , __magic_name__=99 , __magic_name__=32 , __magic_name__=5 , __magic_name__=4 , __magic_name__=37 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=16 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=4 , ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = parent snake_case_ : Optional[Any] = batch_size snake_case_ : List[Any] = seq_length snake_case_ : Tuple = is_training snake_case_ : List[str] = use_attention_mask snake_case_ : Any = use_token_type_ids snake_case_ : Dict = use_labels snake_case_ : Optional[Any] = vocab_size snake_case_ : Dict = hidden_size snake_case_ : List[Any] = num_hidden_layers snake_case_ : Union[str, Any] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Optional[int] = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Optional[int] = max_position_embeddings snake_case_ : Optional[int] = type_vocab_size snake_case_ : List[Any] = type_sequence_label_size snake_case_ : Dict = initializer_range snake_case_ : Dict = num_choices def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : Any = None if self.use_attention_mask: snake_case_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : List[Any] = None if self.use_token_type_ids: snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : List[Any] = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Tuple = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = config_and_inputs snake_case_ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_flax class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : Optional[Any] = FlaxAlbertModelTester(self ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model_class_name in self.all_model_classes: snake_case_ : Dict = model_class_name.from_pretrained('''albert-base-v2''' ) snake_case_ : Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__magic_name__ ) @require_flax class __lowerCAmelCase ( unittest.TestCase ): @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[Any] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) snake_case_ : Optional[int] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) snake_case_ : Dict = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) snake_case_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ )[0] snake_case_ : Tuple = (1, 11, 768) self.assertEqual(output.shape , __magic_name__ ) snake_case_ : str = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __magic_name__ , atol=1e-4 ) )
279
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL snake_case : Any = logging.get_logger(__name__) def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ): """simple docstring""" a :Any = b.T a :Tuple = np.sum(np.square(UpperCAmelCase_ ) , axis=1 ) a :Union[str, Any] = np.sum(np.square(UpperCAmelCase_ ) , axis=0 ) a :Tuple = np.matmul(UpperCAmelCase_ , UpperCAmelCase_ ) a :List[str] = aa[:, None] - 2 * ab + ba[None, :] return d def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ): """simple docstring""" a :Union[str, Any] = x.reshape(-1 , 3 ) a :List[Any] = squared_euclidean_distance(UpperCAmelCase_ , UpperCAmelCase_ ) return np.argmin(UpperCAmelCase_ , axis=1 ) class _snake_case ( _snake_case ): SCREAMING_SNAKE_CASE__ = ['pixel_values'] def __init__( self , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = True , _lowerCamelCase = True , **_lowerCamelCase , ): super().__init__(**_lowerCamelCase ) a :int = size if size is not None else {'''height''': 256, '''width''': 256} a :Any = get_size_dict(_lowerCamelCase ) a :List[Any] = np.array(_lowerCamelCase ) if clusters is not None else None a :Optional[Any] = do_resize a :Any = size a :Dict = resample a :Optional[int] = do_normalize a :Optional[int] = do_color_quantize def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = PILImageResampling.BILINEAR , _lowerCamelCase = None , **_lowerCamelCase , ): a :Optional[Any] = get_size_dict(_lowerCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( _lowerCamelCase , size=(size['''height'''], size['''width''']) , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , ): a :int = rescale(image=_lowerCamelCase , scale=1 / 127.5 , data_format=_lowerCamelCase ) a :List[Any] = image - 1 return image def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = ChannelDimension.FIRST , **_lowerCamelCase , ): a :Optional[int] = do_resize if do_resize is not None else self.do_resize a :Optional[Any] = size if size is not None else self.size a :Optional[int] = get_size_dict(_lowerCamelCase ) a :Union[str, Any] = resample if resample is not None else self.resample a :int = do_normalize if do_normalize is not None else self.do_normalize a :Tuple = do_color_quantize if do_color_quantize is not None else self.do_color_quantize a :List[str] = clusters if clusters is not None else self.clusters a :List[str] = np.array(_lowerCamelCase ) a :List[Any] = make_list_of_images(_lowerCamelCase ) if not valid_images(_lowerCamelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. a :Tuple = [to_numpy_array(_lowerCamelCase ) for image in images] if do_resize: a :List[str] = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase ) for image in images] if do_normalize: a :List[Any] = [self.normalize(image=_lowerCamelCase ) for image in images] if do_color_quantize: a :Union[str, Any] = [to_channel_dimension_format(_lowerCamelCase , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) a :Union[str, Any] = np.array(_lowerCamelCase ) a :List[Any] = color_quantize(_lowerCamelCase , _lowerCamelCase ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) a :List[str] = images.shape[0] a :Optional[Any] = images.reshape(_lowerCamelCase , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. a :Optional[int] = list(_lowerCamelCase ) else: a :Tuple = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase ) for image in images] a :Any = {'''input_ids''': images} return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase )
94
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "openai/imagegpt-small": "", "openai/imagegpt-medium": "", "openai/imagegpt-large": "", } class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'imagegpt' _SCREAMING_SNAKE_CASE = ['past_key_values'] _SCREAMING_SNAKE_CASE = { 'hidden_size': 'n_embd', 'max_position_embeddings': 'n_positions', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any: lowerCAmelCase = vocab_size lowerCAmelCase = n_positions lowerCAmelCase = n_embd lowerCAmelCase = n_layer lowerCAmelCase = n_head lowerCAmelCase = n_inner lowerCAmelCase = activation_function lowerCAmelCase = resid_pdrop lowerCAmelCase = embd_pdrop lowerCAmelCase = attn_pdrop lowerCAmelCase = layer_norm_epsilon lowerCAmelCase = initializer_range lowerCAmelCase = scale_attn_weights lowerCAmelCase = use_cache lowerCAmelCase = scale_attn_by_inverse_layer_idx lowerCAmelCase = reorder_and_upcast_attn lowerCAmelCase = tie_word_embeddings super().__init__(tie_word_embeddings=lowercase , **lowercase ) class lowercase ( _UpperCAmelCase ): @property def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ] ) def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]: lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase ) lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) ) return inputs
46
0
import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class lowercase__ ( unittest.TestCase): @slow def __A ( self : Optional[int] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = FlaxAutoModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def __A ( self : str ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(UpperCamelCase__ ): SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = FlaxAutoModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) @slow def __A ( self : List[str] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = FlaxBertModel.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : int = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase__ : Any ): return model(**UpperCamelCase__ ) eval(**UpperCamelCase__ ).block_until_ready() @slow def __A ( self : List[str] ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Tuple = FlaxRobertaModel.from_pretrained(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : str = tokenizer('''Do you support jax jitted function?''' , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase__ : Dict ): return model(**UpperCamelCase__ ) eval(**UpperCamelCase__ ).block_until_ready() def __A ( self : str ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ): SCREAMING_SNAKE_CASE : Tuple = FlaxAutoModel.from_pretrained('''bert-base''' ) def __A ( self : List[Any] ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): SCREAMING_SNAKE_CASE : Tuple = FlaxAutoModel.from_pretrained(UpperCamelCase__ , revision='''aaaaaa''' ) def __A ( self : Optional[Any] ): '''simple docstring''' with self.assertRaisesRegex( UpperCamelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''' , ): SCREAMING_SNAKE_CASE : List[Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def __A ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase__ , '''Use `from_pt=True` to load this model''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
258
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int __UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowercase__ ( datasets.BuilderConfig): UpperCamelCase_ = None def A ( _lowercase , _lowercase , ): import pyspark def generate_fn(): SCREAMING_SNAKE_CASE : str = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: SCREAMING_SNAKE_CASE : Any = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' ) SCREAMING_SNAKE_CASE : Tuple = partition_df.collect() SCREAMING_SNAKE_CASE : str = 0 for row in rows: yield f"""{partition_id}_{row_id}""", row.asDict() row_id += 1 return generate_fn class lowercase__ ( _BaseExamplesIterable): def __init__( self : Optional[Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : Union[str, Any]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = df SCREAMING_SNAKE_CASE : List[str] = partition_order or range(self.df.rdd.getNumPartitions() ) SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : List[Any] ): '''simple docstring''' yield from self.generate_examples_fn() def __A ( self : Tuple , UpperCamelCase__ : np.random.Generator ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCamelCase__ ) return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ ) def __A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ ) return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ ) @property def __A ( self : Tuple ): '''simple docstring''' return len(self.partition_order ) class lowercase__ ( datasets.DatasetBuilder): UpperCamelCase_ = SparkConfig def __init__( self : Union[str, Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : Union[str, Any] , ): '''simple docstring''' import pyspark SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.getOrCreate() SCREAMING_SNAKE_CASE : List[Any] = df SCREAMING_SNAKE_CASE : Tuple = working_dir super().__init__( cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , ) def __A ( self : Tuple ): '''simple docstring''' def create_cache_and_write_probe(UpperCamelCase__ : str ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Any = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCamelCase__ , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: SCREAMING_SNAKE_CASE : Dict = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def __A ( self : Any ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __A ( self : str , UpperCamelCase__ : datasets.download.download_manager.DownloadManager ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def __A ( self : int , UpperCamelCase__ : List[Any] ): '''simple docstring''' import pyspark def get_arrow_batch_size(UpperCamelCase__ : Tuple ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) SCREAMING_SNAKE_CASE : int = self.df.count() SCREAMING_SNAKE_CASE : Tuple = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. SCREAMING_SNAKE_CASE : Optional[Any] = ( self.df.limit(UpperCamelCase__ ) .repartition(1 ) .mapInArrow(UpperCamelCase__ , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) SCREAMING_SNAKE_CASE : Optional[Any] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. SCREAMING_SNAKE_CASE : List[str] = min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) ) SCREAMING_SNAKE_CASE : Optional[int] = self.df.repartition(UpperCamelCase__ ) def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , ): '''simple docstring''' import pyspark SCREAMING_SNAKE_CASE : int = ParquetWriter if file_format == '''parquet''' else ArrowWriter SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath SCREAMING_SNAKE_CASE : Optional[int] = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. SCREAMING_SNAKE_CASE : str = self.config.features SCREAMING_SNAKE_CASE : Optional[int] = self._writer_batch_size SCREAMING_SNAKE_CASE : Optional[int] = self._fs.storage_options def write_arrow(UpperCamelCase__ : Optional[Any] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. SCREAMING_SNAKE_CASE : int = pyspark.TaskContext().taskAttemptId() SCREAMING_SNAKE_CASE : str = next(UpperCamelCase__ , UpperCamelCase__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) SCREAMING_SNAKE_CASE : List[Any] = 0 SCREAMING_SNAKE_CASE : Optional[int] = writer_class( features=UpperCamelCase__ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , ) SCREAMING_SNAKE_CASE : Tuple = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCamelCase__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 SCREAMING_SNAKE_CASE : Optional[int] = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , ) SCREAMING_SNAKE_CASE : List[str] = pa.Table.from_batches([batch] ) writer.write_table(UpperCamelCase__ ) if writer._num_bytes > 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ): SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) ) shutil.move(UpperCamelCase__ , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = ( self.df.mapInArrow(UpperCamelCase__ , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def __A ( self : Dict , UpperCamelCase__ : "datasets.SplitGenerator" , UpperCamelCase__ : str = "arrow" , UpperCamelCase__ : Optional[Union[str, int]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : str , ): '''simple docstring''' self._validate_cache_dir() SCREAMING_SNAKE_CASE : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = not is_remote_filesystem(self._fs ) SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join if is_local else posixpath.join SCREAMING_SNAKE_CASE : List[Any] = '''-TTTTT-SSSSS-of-NNNNN''' SCREAMING_SNAKE_CASE : List[str] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}""" SCREAMING_SNAKE_CASE : Dict = path_join(self._output_dir , UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = 0 SCREAMING_SNAKE_CASE : Dict = 0 SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : Dict = [] SCREAMING_SNAKE_CASE : Dict = [] for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) : int = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = total_num_examples SCREAMING_SNAKE_CASE : Dict = total_num_bytes # should rename everything at the end logger.debug(f"""Renaming {total_shards} shards.""" ) if total_shards > 1: SCREAMING_SNAKE_CASE : Tuple = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. SCREAMING_SNAKE_CASE : str = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ): rename( UpperCamelCase__ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , ) SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Optional[Any] = 0 for i in range(len(UpperCamelCase__ ) ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = task_id_and_num_shards[i] for shard_id in range(UpperCamelCase__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda UpperCamelCase__ : _rename_shard(*UpperCamelCase__ ) ).collect() else: # don't use any pattern SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Union[str, Any] = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase__ , '''''' ) , ) def __A ( self : int , UpperCamelCase__ : "datasets.SplitGenerator" , ): '''simple docstring''' return SparkExamplesIterable(self.df )
258
1
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class SCREAMING_SNAKE_CASE__ : '''simple docstring''' @property def A ( self : List[str] ): '''simple docstring''' return self.get_dummy_input() @property def A ( self : Any ): '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ): '''simple docstring''' _snake_case = 4 _snake_case = 32 _snake_case = (32, 32) _snake_case = torch.manual_seed(0 ) _snake_case = torch.device(lowercase ) _snake_case = (batch_size, num_channels) + sizes _snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase ) _snake_case = {'hidden_states': hidden_states} if include_temb: _snake_case = 128 _snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase ) if include_res_hidden_states_tuple: _snake_case = torch.manual_seed(1 ) _snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),) if include_encoder_hidden_states: _snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase ) if include_skip_sample: _snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase ) return dummy_input def A ( self : Any ): '''simple docstring''' _snake_case = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 128, } if self.block_type == "up": _snake_case = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) _snake_case = self.dummy_input return init_dict, inputs_dict def A ( self : Dict , lowercase : Optional[int] ): '''simple docstring''' _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowercase ) unet_block.to(lowercase ) unet_block.eval() with torch.no_grad(): _snake_case = unet_block(**lowercase ) if isinstance(lowercase , lowercase ): _snake_case = output[0] self.assertEqual(output.shape , self.output_shape ) _snake_case = output[0, -1, -3:, -3:] _snake_case = torch.tensor(lowercase ).to(lowercase ) assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def A ( self : Dict ): '''simple docstring''' _snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common() _snake_case = self.block_class(**lowercase ) model.to(lowercase ) model.train() _snake_case = model(**lowercase ) if isinstance(lowercase , lowercase ): _snake_case = output[0] _snake_case = torch.device(lowercase ) _snake_case = randn_tensor(output.shape , device=lowercase ) _snake_case = torch.nn.functional.mse_loss(lowercase , lowercase ) loss.backward()
282
import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def A ( self : Optional[int] ): '''simple docstring''' _snake_case = 'hf-internal-testing/tiny-random-t5' _snake_case = AutoTokenizer.from_pretrained(lowercase ) _snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) _snake_case = tokenizer('This is me' , return_tensors='pt' ) _snake_case = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) _snake_case = model.generate(**lowercase ) _snake_case = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase ) _snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) _snake_case = model_reloaded.generate(**lowercase ) self.assertTrue(torch.allclose(lowercase , lowercase ) ) def A ( self : List[Any] ): '''simple docstring''' _snake_case = 'hf-internal-testing/tiny-random-t5' _snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase ) _snake_case = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowercase ): model.save_pretrained(lowercase ) _snake_case = model.reverse_bettertransformer() model.save_pretrained(lowercase )
282
1
'''simple docstring''' import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowerCamelCase_ :Union[str, Any] = 1 @register_to_config def __init__( self , snake_case_ = 1_0_0_0 , snake_case_ = None ): '''simple docstring''' self.set_timesteps(snake_case_ ) # standard deviation of the initial noise distribution UpperCAmelCase_ : Union[str, Any] = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. UpperCAmelCase_ : int = 4 # running values UpperCAmelCase_ : str = [] def _UpperCamelCase ( self , snake_case_ , snake_case_ = None ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = num_inference_steps UpperCAmelCase_ : int = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1] UpperCAmelCase_ : Tuple = torch.cat([steps, torch.tensor([0.0] )] ) if self.config.trained_betas is not None: UpperCAmelCase_ : Optional[int] = torch.tensor(self.config.trained_betas , dtype=torch.floataa ) else: UpperCAmelCase_ : Tuple = torch.sin(steps * math.pi / 2 ) ** 2 UpperCAmelCase_ : Dict = (1.0 - self.betas**2) ** 0.5 UpperCAmelCase_ : str = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1] UpperCAmelCase_ : str = timesteps.to(snake_case_ ) UpperCAmelCase_ : Any = [] def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ): '''simple docstring''' if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' ) UpperCAmelCase_ : Any = (self.timesteps == timestep).nonzero().item() UpperCAmelCase_ : Optional[Any] = timestep_index + 1 UpperCAmelCase_ : Dict = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(snake_case_ ) if len(self.ets ) == 1: UpperCAmelCase_ : Tuple = self.ets[-1] elif len(self.ets ) == 2: UpperCAmelCase_ : Any = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets ) == 3: UpperCAmelCase_ : List[str] = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2 else: UpperCAmelCase_ : Union[str, Any] = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4]) UpperCAmelCase_ : Union[str, Any] = self._get_prev_sample(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=snake_case_ ) def _UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ): '''simple docstring''' return sample def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ): '''simple docstring''' UpperCAmelCase_ : int = self.alphas[timestep_index] UpperCAmelCase_ : Union[str, Any] = self.betas[timestep_index] UpperCAmelCase_ : Any = self.alphas[prev_timestep_index] UpperCAmelCase_ : Dict = self.betas[prev_timestep_index] UpperCAmelCase_ : List[Any] = (sample - sigma * ets) / max(snake_case_ , 1E-8 ) UpperCAmelCase_ : Tuple = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
274
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : int = ['a', 'b', 'c'] # Defaults to last layer if both are None UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(snake_case_ , snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , ['c'] ) self.assertEqual(snake_case_ , [2] ) # Out indices set to match out features UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(['a', 'c'] , snake_case_ , snake_case_ ) self.assertEqual(snake_case_ , ['a', 'c'] ) self.assertEqual(snake_case_ , [0, 2] ) # Out features set to match out indices UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_aligned_output_features_output_indices(snake_case_ , [0, 2] , snake_case_ ) self.assertEqual(snake_case_ , ['a', 'c'] ) self.assertEqual(snake_case_ , [0, 2] ) # Out features selected from negative indices UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(snake_case_ , [-3, -1] , snake_case_ ) self.assertEqual(snake_case_ , ['a', 'c'] ) self.assertEqual(snake_case_ , [-3, -1] ) def _UpperCamelCase ( self ): '''simple docstring''' with self.assertRaises(snake_case_ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , snake_case_ ) # Out features must be a list with self.assertRaises(snake_case_ ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(snake_case_ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(snake_case_ ): verify_out_features_out_indices(snake_case_ , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(snake_case_ ): verify_out_features_out_indices(snake_case_ , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(snake_case_ ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(snake_case_ ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(snake_case_ ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def _UpperCamelCase ( self ): '''simple docstring''' UpperCAmelCase_ : List[str] = BackboneMixin() UpperCAmelCase_ : Any = ['a', 'b', 'c'] UpperCAmelCase_ : str = ['a', 'c'] UpperCAmelCase_ : str = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly UpperCAmelCase_ : str = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) UpperCAmelCase_ : Optional[int] = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
274
1
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) a__ = '''bert-base-cased''' a__ = '''fp16''' a__ = '''bf16''' a__ = [FPaa, BFaa] @require_fsdp @require_cuda class snake_case ( _a ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any]) -> List[str]: """simple docstring""" super().setUp() _snake_case : Tuple = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def UpperCamelCase_ ( self : List[str]) -> List[Any]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(__lowerCamelCase): _snake_case : Union[str, Any] = self.dist_env.copy() _snake_case : List[Any] = F'''{i + 1}''' _snake_case : List[Any] = strategy with mockenv_context(**__lowerCamelCase): _snake_case : List[str] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1)) def UpperCamelCase_ ( self : str) -> int: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(__lowerCamelCase): _snake_case : str = self.dist_env.copy() _snake_case : List[Any] = prefetch_policy with mockenv_context(**__lowerCamelCase): _snake_case : Optional[int] = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1)) def UpperCamelCase_ ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(__lowerCamelCase): _snake_case : Any = self.dist_env.copy() _snake_case : Tuple = state_dict_type with mockenv_context(**__lowerCamelCase): _snake_case : Dict = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1)) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only) def UpperCamelCase_ ( self : Tuple) -> str: """simple docstring""" _snake_case : int = AutoModel.from_pretrained(__lowerCamelCase) for policy in FSDP_AUTO_WRAP_POLICY: _snake_case : Any = self.dist_env.copy() _snake_case : Dict = policy if policy == "TRANSFORMER_BASED_WRAP": _snake_case : Any = """BertLayer""" elif policy == "SIZE_BASED_WRAP": _snake_case : Optional[Any] = """2000""" with mockenv_context(**__lowerCamelCase): _snake_case : int = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy) _snake_case : List[Any] = self.dist_env.copy() _snake_case : Optional[int] = """TRANSFORMER_BASED_WRAP""" _snake_case : int = """T5Layer""" with mockenv_context(**__lowerCamelCase): _snake_case : Any = FullyShardedDataParallelPlugin() with self.assertRaises(__lowerCamelCase) as cm: fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception)) _snake_case : List[str] = self.dist_env.copy() _snake_case : str = """SIZE_BASED_WRAP""" _snake_case : int = """0""" with mockenv_context(**__lowerCamelCase): _snake_case : Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(__lowerCamelCase) self.assertIsNone(fsdp_plugin.auto_wrap_policy) def UpperCamelCase_ ( self : str) -> Optional[int]: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: _snake_case : List[Any] = self.dist_env.copy() _snake_case : Union[str, Any] = mp_dtype with mockenv_context(**__lowerCamelCase): _snake_case : Dict = Accelerator() if mp_dtype == "fp16": _snake_case : int = torch.floataa elif mp_dtype == "bf16": _snake_case : Union[str, Any] = torch.bfloataa _snake_case : Dict = MixedPrecision(param_dtype=__lowerCamelCase , reduce_dtype=__lowerCamelCase , buffer_dtype=__lowerCamelCase) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __lowerCamelCase) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , __lowerCamelCase)) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler) AcceleratorState._reset_state(__lowerCamelCase) def UpperCamelCase_ ( self : Dict) -> int: """simple docstring""" from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: _snake_case : Union[str, Any] = self.dist_env.copy() _snake_case : Union[str, Any] = str(__lowerCamelCase).lower() with mockenv_context(**__lowerCamelCase): _snake_case : int = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__lowerCamelCase)) @require_fsdp @require_multi_gpu @slow class snake_case ( _a ): '''simple docstring''' def UpperCamelCase_ ( self : List[Any]) -> Optional[Any]: """simple docstring""" super().setUp() _snake_case : Optional[int] = 0.82 _snake_case : Any = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] _snake_case : List[Any] = { """multi_gpu_fp16""": 3200, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 2000, """fsdp_full_shard_transformer_based_wrap_fp16""": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } _snake_case : Optional[int] = 160 _snake_case : Union[str, Any] = 160 _snake_case : Tuple = inspect.getfile(accelerate.test_utils) _snake_case : str = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """external_deps"""]) def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = os.path.join(self.test_scripts_folder , """test_performance.py""") _snake_case : Any = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: _snake_case : Optional[Any] = cmd.copy() for i, strategy in enumerate(__lowerCamelCase): if strategy.lower() in config: cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''') break if "fp32" in config: cmd_config.append("""--mixed_precision=no""") else: cmd_config.append("""--mixed_precision=fp16""") if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''') break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""") elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""") cmd_config.extend( [ self.test_file_path, F'''--output_dir={self.tmpdir}''', F'''--performance_lower_bound={self.performance_lower_bound}''', ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy()) def UpperCamelCase_ ( self : int) -> Tuple: """simple docstring""" _snake_case : Dict = os.path.join(self.test_scripts_folder , """test_checkpointing.py""") _snake_case : List[str] = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(__lowerCamelCase): _snake_case : List[str] = cmd.copy() cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''') if strategy != "FULL_SHARD": continue _snake_case : Union[str, Any] = len(__lowerCamelCase) for state_dict_type in FSDP_STATE_DICT_TYPE: _snake_case : Dict = cmd_config[:state_dict_config_index] cmd_config.append(F'''--fsdp_state_dict_type={state_dict_type}''') cmd_config.extend( [ self.test_file_path, F'''--output_dir={self.tmpdir}''', """--partial_train_epoch=1""", ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy()) _snake_case : Optional[Any] = cmd_config[:-1] _snake_case : int = os.path.join(self.tmpdir , """epoch_0""") cmd_config.extend( [ F'''--resume_from_checkpoint={resume_from_checkpoint}''', ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy()) def UpperCamelCase_ ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" _snake_case : List[Any] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""") _snake_case : Union[str, Any] = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): _snake_case : List[str] = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""]) else: cmd_config.extend(["""--mixed_precision=no"""]) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""]) for i, strategy in enumerate(__lowerCamelCase): if strategy.lower() in spec: cmd_config.append(F'''--fsdp_sharding_strategy={i+1}''') break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(F'''--fsdp_auto_wrap_policy={policy}''') break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""") elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""") cmd_config.extend( [ self.test_file_path, F'''--output_dir={self.tmpdir}''', F'''--peak_memory_upper_bound={peak_mem_upper_bound}''', F'''--n_train={self.n_train}''', F'''--n_val={self.n_val}''', ]) with patch_environment(omp_num_threads=1): execute_subprocess_async(__lowerCamelCase , env=os.environ.copy())
317
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] ) -> bool: """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : list[int] , __magic_name__ : int ) -> bool: """simple docstring""" if curr_ind == len(__magic_name__ ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__magic_name__ ) ): if valid_connection(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ): # Insert current vertex into path as next transition UpperCamelCase :str = next_ver # Validate created path if util_hamilton_cycle(__magic_name__ , __magic_name__ , curr_ind + 1 ): return True # Backtrack UpperCamelCase :Union[str, Any] = -1 return False def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int = 0 ) -> list[int]: """simple docstring""" UpperCamelCase :Union[str, Any] = [-1] * (len(__magic_name__ ) + 1) # initialize start and end of path with starting index UpperCamelCase :Any = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__magic_name__ , __magic_name__ , 1 ) else []
38
0
'''simple docstring''' from importlib import import_module from .logging import get_logger _UpperCamelCase = get_logger(__name__) class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith("""__""" ): setattr(self , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) __UpperCAmelCase : str = module._original_module if isinstance(__UpperCAmelCase , _PatchedModuleObj ) else module class _A : _SCREAMING_SNAKE_CASE : List[Any] = [] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = obj __UpperCAmelCase : str = target __UpperCAmelCase : Any = new __UpperCAmelCase : Optional[int] = target.split(""".""" )[0] __UpperCAmelCase : Dict = {} __UpperCAmelCase : List[Any] = attrs or [] def __enter__( self ) -> str: '''simple docstring''' *__UpperCAmelCase , __UpperCAmelCase : Tuple = self.target.split(""".""" ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__UpperCAmelCase ) ): try: __UpperCAmelCase : List[str] = import_module(""".""".join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): __UpperCAmelCase : int = getattr(self.obj , __UpperCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): __UpperCAmelCase : Tuple = obj_attr # patch at top level setattr(self.obj , __UpperCAmelCase , _PatchedModuleObj(__UpperCAmelCase , attrs=self.attrs ) ) __UpperCAmelCase : Tuple = getattr(self.obj , __UpperCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__UpperCAmelCase , __UpperCAmelCase , _PatchedModuleObj(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , attrs=self.attrs ) ) __UpperCAmelCase : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ) # finally set the target attribute setattr(__UpperCAmelCase , __UpperCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: __UpperCAmelCase : str = getattr(import_module(""".""".join(__UpperCAmelCase ) ) , __UpperCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __UpperCAmelCase ) is attr_value: __UpperCAmelCase : int = getattr(self.obj , __UpperCAmelCase ) setattr(self.obj , __UpperCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" __UpperCAmelCase : Tuple = globals()["""__builtins__"""][target_attr] setattr(self.obj , __UpperCAmelCase , self.new ) else: raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self , *__UpperCAmelCase ) -> Any: '''simple docstring''' for attr in list(self.original ): setattr(self.obj , __UpperCAmelCase , self.original.pop(__UpperCAmelCase ) ) def __A ( self ) -> str: '''simple docstring''' self.__enter__() self._active_patches.append(self ) def __A ( self ) -> Tuple: '''simple docstring''' try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, ) __a :Union[str, Any] = logging.getLogger(__name__) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" A_ = git.Repo(search_parent_directories=lowerCAmelCase__ ) A_ = { "repo_id": str(lowerCAmelCase__ ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), } with open(os.path.join(lowerCAmelCase__ ,"git_log.json" ) ,"w" ) as f: json.dump(lowerCAmelCase__ ,lowerCAmelCase__ ,indent=4 ) def __snake_case ( __UpperCamelCase : Any ): """simple docstring""" if params.n_gpu <= 0: A_ = 0 A_ = -1 A_ = True A_ = False return assert torch.cuda.is_available() logger.info("Initializing GPUs" ) if params.n_gpu > 1: assert params.local_rank != -1 A_ = int(os.environ["WORLD_SIZE"] ) A_ = int(os.environ["N_GPU_NODE"] ) A_ = int(os.environ["RANK"] ) # number of nodes / node ID A_ = params.world_size // params.n_gpu_per_node A_ = params.global_rank // params.n_gpu_per_node A_ = True assert params.n_nodes == int(os.environ["N_NODES"] ) assert params.node_id == int(os.environ["NODE_RANK"] ) # local job (single GPU) else: assert params.local_rank == -1 A_ = 1 A_ = 0 A_ = 0 A_ = 0 A_ = 1 A_ = 1 A_ = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode A_ = params.node_id == 0 and params.local_rank == 0 A_ = params.n_nodes > 1 # summary A_ = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes ) logger.info(PREFIX + "Node ID : %i" % params.node_id ) logger.info(PREFIX + "Local rank : %i" % params.local_rank ) logger.info(PREFIX + "World size : %i" % params.world_size ) logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node ) logger.info(PREFIX + "Master : %s" % str(params.is_master ) ) logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) ) logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) ) logger.info(PREFIX + "Hostname : %s" % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info("Initializing PyTorch distributed" ) torch.distributed.init_process_group( init_method="env://" ,backend="nccl" ,) def __snake_case ( __UpperCamelCase : int ): """simple docstring""" np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
312
import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase__ :List[Any] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ): lowercase_ : str =XLNetTokenizer lowercase_ : Dict =XLNetTokenizerFast lowercase_ : str =True lowercase_ : str =True def A__ ( self): super().setUp() # We have a SentencePiece fixture for testing lowercase = XLNetTokenizer(A__ ,keep_accents=A__) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname) def A__ ( self): lowercase = '''<s>''' lowercase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A__) ,A__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A__) ,A__) def A__ ( self): lowercase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] ,'''<unk>''') self.assertEqual(vocab_keys[1] ,'''<s>''') self.assertEqual(vocab_keys[-1] ,'''<eod>''') self.assertEqual(len(A__) ,1_0_0_6) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size ,1_0_0_0) def A__ ( self): lowercase = XLNetTokenizer(A__ ,keep_accents=A__) lowercase = tokenizer.tokenize('''This is a test''') self.assertListEqual(A__ ,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(A__) ,[2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]) lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( A__ ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] ,) lowercase = tokenizer.convert_tokens_to_ids(A__) self.assertListEqual(A__ ,[8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4]) lowercase = tokenizer.convert_ids_to_tokens(A__) self.assertListEqual( A__ ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] ,) def A__ ( self): lowercase = XLNetTokenizer(A__ ,do_lower_case=A__) lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( A__ ,[ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] ,) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''') ,['''▁he''', '''ll''', '''o''']) def A__ ( self): lowercase = XLNetTokenizer(A__ ,do_lower_case=A__) lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( A__ ,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] ,) @slow def A__ ( self): lowercase = XLNetTokenizer.from_pretrained('''xlnet-base-cased''') lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=A__) lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=A__) lowercase = tokenizer.build_inputs_with_special_tokens(A__) lowercase = tokenizer.build_inputs_with_special_tokens(A__ ,A__) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def A__ ( self): # fmt: off lowercase = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A__ ,model_name='''xlnet-base-cased''' ,revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' ,)
101
0
from __future__ import annotations from sys import maxsize from typing import Generic, TypeVar __A = TypeVar('''T''') def __a ( lowerCAmelCase_ : Tuple ) -> str: '''simple docstring''' return (position - 1) // 2 def __a ( lowerCAmelCase_ : Dict ) -> Any: '''simple docstring''' return (2 * position) + 1 def __a ( lowerCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' return (2 * position) + 2 class lowercase ( Generic[T]): """simple docstring""" def __init__( self : Dict ) -> int: UpperCAmelCase_= [] UpperCAmelCase_= {} UpperCAmelCase_= 0 def __len__( self : Tuple ) -> Any: return self.elements def __repr__( self : int ) -> str: return str(self.heap ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: # Check if the priority queue is empty return self.elements == 0 def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : T , __UpperCAmelCase : int ) -> Any: # Add an element with given priority to the queue self.heap.append((elem, weight) ) UpperCAmelCase_= self.elements self.elements += 1 self._bubble_up(lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: # Remove and return the element with lowest weight (highest priority) if self.elements > 1: self._swap_nodes(0 , self.elements - 1 ) UpperCAmelCase_= self.heap.pop() del self.position_map[elem] self.elements -= 1 if self.elements > 0: UpperCAmelCase_= self.heap[0] self._bubble_down(lowercase_ ) return elem def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : T , __UpperCAmelCase : int ) -> int: # Update the weight of the given key UpperCAmelCase_= self.position_map[elem] UpperCAmelCase_= (elem, weight) if position > 0: UpperCAmelCase_= get_parent_position(lowercase_ ) UpperCAmelCase_= self.heap[parent_position] if parent_weight > weight: self._bubble_up(lowercase_ ) else: self._bubble_down(lowercase_ ) else: self._bubble_down(lowercase_ ) def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : T ) -> Optional[int]: # Place a node at the proper position (upward movement) [to be used internally # only] UpperCAmelCase_= self.position_map[elem] if curr_pos == 0: return None UpperCAmelCase_= get_parent_position(lowercase_ ) UpperCAmelCase_= self.heap[curr_pos] UpperCAmelCase_= self.heap[parent_position] if parent_weight > weight: self._swap_nodes(lowercase_ , lowercase_ ) return self._bubble_up(lowercase_ ) return None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : T ) -> int: # Place a node at the proper position (downward movement) [to be used # internally only] UpperCAmelCase_= self.position_map[elem] UpperCAmelCase_= self.heap[curr_pos] UpperCAmelCase_= get_child_left_position(lowercase_ ) UpperCAmelCase_= get_child_right_position(lowercase_ ) if child_left_position < self.elements and child_right_position < self.elements: UpperCAmelCase_= self.heap[child_left_position] UpperCAmelCase_= self.heap[child_right_position] if child_right_weight < child_left_weight and child_right_weight < weight: self._swap_nodes(lowercase_ , lowercase_ ) return self._bubble_down(lowercase_ ) if child_left_position < self.elements: UpperCAmelCase_= self.heap[child_left_position] if child_left_weight < weight: self._swap_nodes(lowercase_ , lowercase_ ) return self._bubble_down(lowercase_ ) else: return None if child_right_position < self.elements: UpperCAmelCase_= self.heap[child_right_position] if child_right_weight < weight: self._swap_nodes(lowercase_ , lowercase_ ) return self._bubble_down(lowercase_ ) return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> List[str]: # Swap the nodes at the given positions UpperCAmelCase_= self.heap[nodea_pos][0] UpperCAmelCase_= self.heap[nodea_pos][0] UpperCAmelCase_= ( self.heap[nodea_pos], self.heap[nodea_pos], ) UpperCAmelCase_= nodea_pos UpperCAmelCase_= nodea_pos class lowercase ( Generic[T]): """simple docstring""" def __init__( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_= {} UpperCAmelCase_= 0 def __repr__( self : Optional[Any] ) -> int: return str(self.connections ) def __len__( self : Tuple ) -> List[str]: return self.nodes def _SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : T ) -> Tuple: # Add a node in the graph if it is not in the graph if node not in self.connections: UpperCAmelCase_= {} self.nodes += 1 def _SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : T , __UpperCAmelCase : T , __UpperCAmelCase : int ) -> str: # Add an edge between 2 nodes in the graph self.add_node(lowercase_ ) self.add_node(lowercase_ ) UpperCAmelCase_= weight UpperCAmelCase_= weight def __a ( lowerCAmelCase_ : int ,) -> int: '''simple docstring''' UpperCAmelCase_= {node: maxsize for node in graph.connections} UpperCAmelCase_= {node: None for node in graph.connections} UpperCAmelCase_= MinPriorityQueue() for node, weight in dist.items(): priority_queue.push(A_ ,A_ ) if priority_queue.is_empty(): return dist, parent # initialization UpperCAmelCase_= priority_queue.extract_min() UpperCAmelCase_= 0 for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCAmelCase_= dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ ,dist[neighbour] ) UpperCAmelCase_= node # running prim's algorithm while not priority_queue.is_empty(): UpperCAmelCase_= priority_queue.extract_min() for neighbour in graph.connections[node]: if dist[neighbour] > dist[node] + graph.connections[node][neighbour]: UpperCAmelCase_= dist[node] + graph.connections[node][neighbour] priority_queue.update_key(A_ ,dist[neighbour] ) UpperCAmelCase_= node return dist, parent
358
from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class lowercase ( snake_case__): """simple docstring""" a__ : str = ["vqvae"] def __init__( self : List[Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str: super().__init__() self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1_000 @torch.no_grad() def __call__( self : List[Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Union[str, Any]=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: UpperCAmelCase_= steps or self.get_default_steps() self.scheduler.set_timesteps(__UpperCAmelCase ) UpperCAmelCase_= step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: UpperCAmelCase_= (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: UpperCAmelCase_= randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=__UpperCAmelCase , device=self.device , ) UpperCAmelCase_= noise UpperCAmelCase_= None if audio_file is not None or raw_audio is not None: self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase ) UpperCAmelCase_= self.mel.audio_slice_to_image(__UpperCAmelCase ) UpperCAmelCase_= np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) UpperCAmelCase_= (input_image / 255) * 2 - 1 UpperCAmelCase_= torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: UpperCAmelCase_= self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample( generator=__UpperCAmelCase )[0] UpperCAmelCase_= self.vqvae.config.scaling_factor * input_images if start_step > 0: UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] ) UpperCAmelCase_= ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) UpperCAmelCase_= int(mask_start_secs * pixels_per_second ) UpperCAmelCase_= int(mask_end_secs * pixels_per_second ) UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , __UpperCAmelCase ): UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["""sample"""] else: UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""] if isinstance(self.scheduler , __UpperCAmelCase ): UpperCAmelCase_= self.scheduler.step( model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""] else: UpperCAmelCase_= self.scheduler.step( model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""] if mask is not None: if mask_start > 0: UpperCAmelCase_= mask[:, step, :, :mask_start] if mask_end > 0: UpperCAmelCase_= mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance UpperCAmelCase_= 1 / self.vqvae.config.scaling_factor * images UpperCAmelCase_= self.vqvae.decode(__UpperCAmelCase )["""sample"""] UpperCAmelCase_= (images / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_= images.cpu().permute(0 , 2 , 3 , 1 ).numpy() UpperCAmelCase_= (images * 255).round().astype("""uint8""" ) UpperCAmelCase_= list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(__UpperCAmelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) ) UpperCAmelCase_= [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) ) @torch.no_grad() def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , __UpperCAmelCase ) self.scheduler.set_timesteps(__UpperCAmelCase ) UpperCAmelCase_= np.array( [np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) UpperCAmelCase_= (sample / 255) * 2 - 1 UpperCAmelCase_= torch.Tensor(__UpperCAmelCase ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): UpperCAmelCase_= t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps UpperCAmelCase_= self.scheduler.alphas_cumprod[t] UpperCAmelCase_= ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) UpperCAmelCase_= 1 - alpha_prod_t UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""] UpperCAmelCase_= (1 - alpha_prod_t_prev) ** 0.5 * model_output UpperCAmelCase_= (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) UpperCAmelCase_= sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ) -> torch.Tensor: UpperCAmelCase_= acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) ) return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
277
0
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor _snake_case = logging.get_logger(__name__) class UpperCAmelCase_ ( UpperCamelCase ): '''simple docstring''' def __init__( self , *__A , **__A ): """simple docstring""" warnings.warn( "The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use MobileViTImageProcessor instead." , __A , ) super().__init__(*__A , **__A )
283
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : str = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : List[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Dict = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Union[str, Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[int] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Any = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : Optional[Any] = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) class UpperCAmelCase_ ( metaclass=UpperCamelCase ): '''simple docstring''' __A : int = ["flax"] def __init__( self , *__A , **__A ): """simple docstring""" requires_backends(self , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] ) @classmethod def _snake_case ( cls , *__A , **__A ): """simple docstring""" requires_backends(cls , ["flax"] )
283
1
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 snake_case_ : Dict = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 snake_case_ : Dict = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class lowercase__ : def __init__( self : str ): '''simple docstring''' _UpperCamelCase : List[str] = WATERMARK_BITS _UpperCamelCase : Union[str, Any] = WatermarkEncoder() self.encoder.set_watermark('bits' ,self.watermark ) def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ): '''simple docstring''' # can't encode images that are smaller than 256 if images.shape[-1] < 256: return images _UpperCamelCase : Optional[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy() _UpperCamelCase : Tuple = [self.encoder.encode(SCREAMING_SNAKE_CASE_ ,'dwtDct' ) for image in images] _UpperCamelCase : Any = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).permute(0 ,3 ,1 ,2 ) _UpperCamelCase : List[str] = torch.clamp(2 * (images / 255 - 0.5) ,min=-1.0 ,max=1.0 ) return images
365
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) snake_case_ : Optional[Any] = { 'configuration_clip': [ 'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPConfig', 'CLIPOnnxConfig', 'CLIPTextConfig', 'CLIPVisionConfig', ], 'processing_clip': ['CLIPProcessor'], 'tokenization_clip': ['CLIPTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ['CLIPTokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ['CLIPFeatureExtractor'] snake_case_ : Dict = ['CLIPImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Tuple = [ 'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPModel', 'CLIPPreTrainedModel', 'CLIPTextModel', 'CLIPTextModelWithProjection', 'CLIPVisionModel', 'CLIPVisionModelWithProjection', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : List[Any] = [ 'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFCLIPModel', 'TFCLIPPreTrainedModel', 'TFCLIPTextModel', 'TFCLIPVisionModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ 'FlaxCLIPModel', 'FlaxCLIPPreTrainedModel', 'FlaxCLIPTextModel', 'FlaxCLIPTextPreTrainedModel', 'FlaxCLIPVisionModel', 'FlaxCLIPVisionPreTrainedModel', ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys snake_case_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
236
0
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _A ( lowercase , lowercase = "cpu" , lowercase = None ): """simple docstring""" a =torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE ) for k, v in tqdm(state_dict.items() ): if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) a =v.half() if save_path is None: # overwrite src_path a =src_path torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": fire.Fire(convert)
81
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
302
0
"""simple docstring""" import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __UpperCamelCase : Union[str, Any] = '''\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } ''' __UpperCamelCase : Any = '''\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve ''' __UpperCamelCase : Dict = ''' Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: "c" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric(\'mauve\') >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): """simple docstring""" def __lowerCAmelCase ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/krishnap25/mauve''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Value('''string''' ,id='''sequence''' ), } ) ,codebase_urls=['''https://github.com/krishnap25/mauve'''] ,reference_urls=[ '''https://arxiv.org/abs/2102.01454''', '''https://github.com/krishnap25/mauve''', ] ,) def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : int ,lowercase_ : Optional[int] ,lowercase_ : str=None ,lowercase_ : Optional[int]=None ,lowercase_ : Any=None ,lowercase_ : Optional[Any]=None ,lowercase_ : Dict="auto" ,lowercase_ : List[Any]=-1 ,lowercase_ : Optional[Any]=0.9 ,lowercase_ : Any=5 ,lowercase_ : int=5_0_0 ,lowercase_ : Optional[int]="gpt2-large" ,lowercase_ : Optional[int]=-1 ,lowercase_ : Dict=1_0_2_4 ,lowercase_ : Tuple=2_5 ,lowercase_ : str=5 ,lowercase_ : List[Any]=True ,lowercase_ : Union[str, Any]=2_5 ,): lowerCAmelCase__ : int = compute_mauve( p_text=lowercase_ ,q_text=lowercase_ ,p_features=lowercase_ ,q_features=lowercase_ ,p_tokens=lowercase_ ,q_tokens=lowercase_ ,num_buckets=lowercase_ ,pca_max_data=lowercase_ ,kmeans_explained_var=lowercase_ ,kmeans_num_redo=lowercase_ ,kmeans_max_iter=lowercase_ ,featurize_model_name=lowercase_ ,device_id=lowercase_ ,max_text_length=lowercase_ ,divergence_curve_discretization_size=lowercase_ ,mauve_scaling_factor=lowercase_ ,verbose=lowercase_ ,seed=lowercase_ ,) return out
355
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml __UpperCamelCase : Any = NewType('''DataClass''', Any) __UpperCamelCase : List[str] = NewType('''DataClassType''', Any) def __SCREAMING_SNAKE_CASE ( A_ ): if isinstance(A_ , A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = {str(A_ ): choice for choice in choices} return lambda A_ : str_to_choice.get(A_ , A_ ) def __SCREAMING_SNAKE_CASE ( *, A_ = None , A_ = None , A_ = dataclasses.MISSING , A_ = dataclasses.MISSING , A_ = None , **A_ , ): if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls lowerCAmelCase__ : Dict = {} if aliases is not None: lowerCAmelCase__ : int = aliases if help is not None: lowerCAmelCase__ : Optional[int] = help return dataclasses.field(metadata=A_ , default=A_ , default_factory=A_ , **A_ ) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 42 def __init__( self : Dict ,lowercase_ : Union[DataClassType, Iterable[DataClassType]] ,**lowercase_ : str ): # To make the default appear when using --help if "formatter_class" not in kwargs: lowerCAmelCase__ : Tuple = ArgumentDefaultsHelpFormatter super().__init__(**lowercase_ ) if dataclasses.is_dataclass(lowercase_ ): lowerCAmelCase__ : Tuple = [dataclass_types] lowerCAmelCase__ : List[str] = list(lowercase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(lowercase_ ) @staticmethod def __lowerCAmelCase ( lowercase_ : ArgumentParser ,lowercase_ : dataclasses.Field ): lowerCAmelCase__ : Dict = F'--{field.name}' lowerCAmelCase__ : List[str] = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type ,lowercase_ ): raise RuntimeError( '''Unresolved type detected, which should have been done with the help of ''' '''`typing.get_type_hints` method by default''' ) lowerCAmelCase__ : List[str] = kwargs.pop('''aliases''' ,[] ) if isinstance(lowercase_ ,lowercase_ ): lowerCAmelCase__ : Optional[Any] = [aliases] lowerCAmelCase__ : Union[str, Any] = getattr(field.type ,'''__origin__''' ,field.type ) if origin_type is Union or (hasattr(lowercase_ ,'''UnionType''' ) and isinstance(lowercase_ ,types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(lowercase_ ) not in field.type.__args__ ): raise ValueError( '''Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because''' ''' the argument parser only supports one type per argument.''' F' Problem encountered in field \'{field.name}\'.' ) if type(lowercase_ ) not in field.type.__args__: # filter `str` in Union lowerCAmelCase__ : int = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] lowerCAmelCase__ : List[str] = getattr(field.type ,'''__origin__''' ,field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) lowerCAmelCase__ : Optional[int] = ( field.type.__args__[0] if isinstance(lowercase_ ,field.type.__args__[1] ) else field.type.__args__[1] ) lowerCAmelCase__ : Optional[Any] = getattr(field.type ,'''__origin__''' ,field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) lowerCAmelCase__ : List[Any] = {} if origin_type is Literal or (isinstance(field.type ,lowercase_ ) and issubclass(field.type ,lowercase_ )): if origin_type is Literal: lowerCAmelCase__ : Union[str, Any] = field.type.__args__ else: lowerCAmelCase__ : Optional[Any] = [x.value for x in field.type] lowerCAmelCase__ : List[str] = make_choice_type_function(kwargs['''choices'''] ) if field.default is not dataclasses.MISSING: lowerCAmelCase__ : int = field.default else: lowerCAmelCase__ : Any = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument lowerCAmelCase__ : List[Any] = copy(lowercase_ ) # Hack because type=bool in argparse does not behave as we want. lowerCAmelCase__ : Tuple = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. lowerCAmelCase__ : List[Any] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way lowerCAmelCase__ : Tuple = default # This tells argparse we accept 0 or 1 value after --field_name lowerCAmelCase__ : Union[str, Any] = '''?''' # This is the value that will get picked if we do --field_name (without value) lowerCAmelCase__ : Any = True elif isclass(lowercase_ ) and issubclass(lowercase_ ,lowercase_ ): lowerCAmelCase__ : List[str] = field.type.__args__[0] lowerCAmelCase__ : str = '''+''' if field.default_factory is not dataclasses.MISSING: lowerCAmelCase__ : Dict = field.default_factory() elif field.default is dataclasses.MISSING: lowerCAmelCase__ : str = True else: lowerCAmelCase__ : List[Any] = field.type if field.default is not dataclasses.MISSING: lowerCAmelCase__ : str = field.default elif field.default_factory is not dataclasses.MISSING: lowerCAmelCase__ : Any = field.default_factory() else: lowerCAmelCase__ : Optional[Any] = True parser.add_argument(lowercase_ ,*lowercase_ ,**lowercase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): lowerCAmelCase__ : Optional[Any] = False parser.add_argument(F'--no_{field.name}' ,action='''store_false''' ,dest=field.name ,**lowercase_ ) def __lowerCAmelCase ( self : str ,lowercase_ : DataClassType ): if hasattr(lowercase_ ,'''_argument_group_name''' ): lowerCAmelCase__ : Optional[int] = self.add_argument_group(dtype._argument_group_name ) else: lowerCAmelCase__ : List[str] = self try: lowerCAmelCase__ : Dict[str, type] = get_type_hints(lowercase_ ) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' '''removing line of `from __future__ import annotations` which opts in Postponed ''' '''Evaluation of Annotations (PEP 563)''' ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(lowercase_ ): lowerCAmelCase__ : int = '''.'''.join(map(lowercase_ ,sys.version_info[:3] ) ) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' '''line of `from __future__ import annotations` which opts in union types as ''' '''`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To ''' '''support Python versions that lower than 3.10, you need to use ''' '''`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of ''' '''`X | None`.''' ) from ex raise for field in dataclasses.fields(lowercase_ ): if not field.init: continue lowerCAmelCase__ : Any = type_hints[field.name] self._parse_dataclass_field(lowercase_ ,lowercase_ ) def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[Any]=None ,lowercase_ : str=False ,lowercase_ : str=True ,lowercase_ : Any=None ,lowercase_ : List[str]=None ,): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): lowerCAmelCase__ : int = [] if args_filename: args_files.append(Path(lowercase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix('''.args''' ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values lowerCAmelCase__ : List[str] = ArgumentParser() args_file_parser.add_argument(lowercase_ ,type=lowercase_ ,action='''append''' ) # Use only remaining args for further parsing (remove the args_file_flag) lowerCAmelCase__ ,lowerCAmelCase__ : List[str] = args_file_parser.parse_known_args(args=lowercase_ ) lowerCAmelCase__ : int = vars(lowercase_ ).get(args_file_flag.lstrip('''-''' ) ,lowercase_ ) if cmd_args_file_paths: args_files.extend([Path(lowercase_ ) for p in cmd_args_file_paths] ) lowerCAmelCase__ : Tuple = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last lowerCAmelCase__ : Dict = file_args + args if args is not None else file_args + sys.argv[1:] lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.parse_known_args(args=lowercase_ ) lowerCAmelCase__ : Optional[Any] = [] for dtype in self.dataclass_types: lowerCAmelCase__ : int = {f.name for f in dataclasses.fields(lowercase_ ) if f.init} lowerCAmelCase__ : int = {k: v for k, v in vars(lowercase_ ).items() if k in keys} for k in keys: delattr(lowercase_ ,lowercase_ ) lowerCAmelCase__ : Optional[Any] = dtype(**lowercase_ ) outputs.append(lowercase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(lowercase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}' ) return (*outputs,) def __lowerCAmelCase ( self : Any ,lowercase_ : Dict[str, Any] ,lowercase_ : bool = False ): lowerCAmelCase__ : List[Any] = set(args.keys() ) lowerCAmelCase__ : Any = [] for dtype in self.dataclass_types: lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(lowercase_ ) if f.init} lowerCAmelCase__ : Union[str, Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) lowerCAmelCase__ : Union[str, Any] = dtype(**lowercase_ ) outputs.append(lowercase_ ) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(lowercase_ )}' ) return tuple(lowercase_ ) def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : str ,lowercase_ : bool = False ): with open(Path(lowercase_ ) ,encoding='''utf-8''' ) as open_json_file: lowerCAmelCase__ : Union[str, Any] = json.loads(open_json_file.read() ) lowerCAmelCase__ : List[str] = self.parse_dict(lowercase_ ,allow_extra_keys=lowercase_ ) return tuple(lowercase_ ) def __lowerCAmelCase ( self : Dict ,lowercase_ : str ,lowercase_ : bool = False ): lowerCAmelCase__ : Tuple = self.parse_dict(yaml.safe_load(Path(lowercase_ ).read_text() ) ,allow_extra_keys=lowercase_ ) return tuple(lowercase_ )
74
0
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class _lowerCAmelCase ( lowercase ): """simple docstring""" def __init__( self : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str]=1_3, UpperCAmelCase__ : Union[str, Any]=7, UpperCAmelCase__ : Optional[int]=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]=False, UpperCAmelCase__ : Union[str, Any]=True, UpperCAmelCase__ : Any=9_9, UpperCAmelCase__ : List[Any]=3_2, UpperCAmelCase__ : int=5, UpperCAmelCase__ : Dict=4, UpperCAmelCase__ : Dict=3_7, UpperCAmelCase__ : List[str]="gelu", UpperCAmelCase__ : Optional[int]=0.1, UpperCAmelCase__ : Optional[Any]=0.1, UpperCAmelCase__ : Union[str, Any]=5_1_2, UpperCAmelCase__ : int=1_6, UpperCAmelCase__ : int=2, UpperCAmelCase__ : Optional[Any]=0.02, UpperCAmelCase__ : Optional[int]=3, UpperCAmelCase__ : Tuple=4, UpperCAmelCase__ : List[str]=None, ): __lowercase = parent __lowercase = batch_size __lowercase = seq_length __lowercase = is_training __lowercase = use_input_mask __lowercase = use_token_type_ids __lowercase = use_labels __lowercase = vocab_size __lowercase = hidden_size __lowercase = num_hidden_layers __lowercase = num_attention_heads __lowercase = intermediate_size __lowercase = hidden_act __lowercase = hidden_dropout_prob __lowercase = attention_probs_dropout_prob __lowercase = max_position_embeddings __lowercase = type_vocab_size __lowercase = type_sequence_label_size __lowercase = initializer_range __lowercase = num_labels __lowercase = num_choices __lowercase = scope def _lowercase ( self : Any ): __lowercase = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) __lowercase = None if self.use_input_mask: __lowercase = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase = None __lowercase = None __lowercase = None if self.use_labels: __lowercase = ids_tensor([self.batch_size], self.type_sequence_label_size ) __lowercase = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) __lowercase = ids_tensor([self.batch_size], self.num_choices ) __lowercase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : str ): return DistilBertConfig( vocab_size=self.vocab_size, dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, hidden_dim=self.intermediate_size, hidden_act=self.hidden_act, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def _lowercase ( self : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : str ): __lowercase = DistilBertModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = model(UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any] ): __lowercase = DistilBertForMaskedLM(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Optional[Any] ): __lowercase = DistilBertForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, start_positions=UpperCAmelCase__, end_positions=UpperCAmelCase__ ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple ): __lowercase = self.num_labels __lowercase = DistilBertForSequenceClassification(UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def _lowercase ( self : Any, UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : int ): __lowercase = self.num_labels __lowercase = DistilBertForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Any ): __lowercase = self.num_choices __lowercase = DistilBertForMultipleChoice(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() __lowercase = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous() __lowercase = model( UpperCAmelCase__, attention_mask=UpperCAmelCase__, labels=UpperCAmelCase__, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) ) def _lowercase ( self : Optional[Any] ): __lowercase = self.prepare_config_and_inputs() ((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) = config_and_inputs __lowercase = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( lowercase ,lowercase ,unittest.TestCase ): """simple docstring""" __UpperCAmelCase : int = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) __UpperCAmelCase : List[Any] = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Optional[Any] = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : str = True def _lowercase ( self : Tuple ): __lowercase = DistilBertModelTester(self ) __lowercase = ConfigTester(self, config_class=UpperCAmelCase__, dim=3_7 ) def _lowercase ( self : Optional[int] ): self.config_tester.run_common_tests() def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase__ ) def _lowercase ( self : int ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase__ ) def _lowercase ( self : List[str] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase__ ) def _lowercase ( self : str ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ): __lowercase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase__ ) @slow def _lowercase ( self : List[str] ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase = DistilBertModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @slow @require_torch_gpu def _lowercase ( self : Any ): __lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __lowercase = True __lowercase = model_class(config=UpperCAmelCase__ ) __lowercase = self._prepare_for_class(UpperCAmelCase__, UpperCAmelCase__ ) __lowercase = torch.jit.trace( UpperCAmelCase__, (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCAmelCase__, os.path.join(UpperCAmelCase__, "traced_model.pt" ) ) __lowercase = torch.jit.load(os.path.join(UpperCAmelCase__, "traced_model.pt" ), map_location=UpperCAmelCase__ ) loaded(inputs_dict["input_ids"].to(UpperCAmelCase__ ), inputs_dict["attention_mask"].to(UpperCAmelCase__ ) ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : List[str] ): __lowercase = DistilBertModel.from_pretrained("distilbert-base-uncased" ) __lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) __lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __lowercase = model(UpperCAmelCase__, attention_mask=UpperCAmelCase__ )[0] __lowercase = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape, UpperCAmelCase__ ) __lowercase = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4], UpperCAmelCase__, atol=1E-4 ) )
17
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Optional[int] = SpeechTaTokenizer lowerCamelCase_ : int = False lowerCamelCase_ : Dict = True def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing snake_case_ : Tuple = SpeechTaTokenizer(__magic_name__ ) snake_case_ : Any = AddedToken('''<mask>''' , lstrip=__magic_name__ , rstrip=__magic_name__ ) snake_case_ : int = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : Dict = '''this is a test''' snake_case_ : int = '''this is a test''' return input_text, output_text def lowerCamelCase (self , __magic_name__ , __magic_name__=False , __magic_name__=20 , __magic_name__=5 ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : int = self.get_input_output_texts(__magic_name__ ) snake_case_ : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ ) snake_case_ : Any = tokenizer.decode(__magic_name__ , clean_up_tokenization_spaces=__magic_name__ ) return text, ids def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = '''<pad>''' snake_case_ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ ) def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(__magic_name__ ) , 81 ) def lowerCamelCase (self ) -> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 79 ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : int = self.get_tokenizers(do_lower_case=__magic_name__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): snake_case_ : int = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ : List[Any] = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] snake_case_ : List[Any] = tokenizer.add_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Optional[Any] = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size + len(__magic_name__ ) ) snake_case_ : Union[str, Any] = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ : Union[str, Any] = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} snake_case_ : List[str] = tokenizer.add_special_tokens(__magic_name__ ) snake_case_ : Dict = tokenizer.vocab_size snake_case_ : Dict = len(__magic_name__ ) self.assertNotEqual(__magic_name__ , 0 ) self.assertEqual(__magic_name__ , __magic_name__ ) self.assertEqual(__magic_name__ , len(__magic_name__ ) ) self.assertEqual(__magic_name__ , all_size_a + len(__magic_name__ ) ) snake_case_ : Tuple = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=__magic_name__ ) self.assertGreaterEqual(len(__magic_name__ ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[str]: '''simple docstring''' pass def lowerCamelCase (self ) -> int: '''simple docstring''' snake_case_ : Dict = self.get_tokenizer() snake_case_ : Optional[Any] = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(__magic_name__ , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__magic_name__ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) snake_case_ : List[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) snake_case_ : List[str] = tokenizer.convert_tokens_to_ids(__magic_name__ ) # fmt: off self.assertListEqual(__magic_name__ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] ) # fmt: on snake_case_ : int = tokenizer.convert_ids_to_tokens(__magic_name__ ) self.assertListEqual( __magic_name__ , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' snake_case_ : Tuple = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off snake_case_ : List[Any] = { '''input_ids''': [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__magic_name__ , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=__magic_name__ , )
279
0
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants _lowerCamelCase : Union[str, Any] = 300 # TEMPERATURE (unit = K) def __lowerCamelCase ( A__ , A__ , A__ , ) -> float: """simple docstring""" if donor_conc <= 0: raise ValueError('Donor concentration should be positive' ) elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive' ) elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive' ) elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
359
'''simple docstring''' import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def __lowerCamelCase ( A__ ) -> Any: """simple docstring""" UpperCamelCase , UpperCamelCase = image.size UpperCamelCase , UpperCamelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) UpperCamelCase = np.array(A__ ).astype(np.floataa ) / 255.0 UpperCamelCase = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase = torch.from_numpy(A__ ) return 2.0 * image - 1.0 class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" def __init__( self : str , UpperCamelCase__ : VQModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ] , ): """simple docstring""" super().__init__() self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[torch.Tensor, PIL.Image.Image] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : Optional[int] = 1_0_0 , UpperCamelCase__ : Optional[float] = 0.0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ): """simple docstring""" if isinstance(UpperCamelCase__ , PIL.Image.Image ): UpperCamelCase = 1 elif isinstance(UpperCamelCase__ , torch.Tensor ): UpperCamelCase = image.shape[0] else: raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCamelCase__ )}""" ) if isinstance(UpperCamelCase__ , PIL.Image.Image ): UpperCamelCase = preprocess(UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase = next(self.unet.parameters() ).dtype UpperCamelCase = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ ) UpperCamelCase = image.to(device=self.device , dtype=UpperCamelCase__ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device ) UpperCamelCase = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase = {} if accepts_eta: UpperCamelCase = eta for t in self.progress_bar(UpperCamelCase__ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase = torch.cat([latents, image] , dim=1 ) UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # predict the noise residual UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase = self.vqvae.decode(UpperCamelCase__ ).sample UpperCamelCase = torch.clamp(UpperCamelCase__ , -1.0 , 1.0 ) UpperCamelCase = image / 2 + 0.5 UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__ )
249
0
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase_ (lowerCamelCase__ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = RobertaTokenizer SCREAMING_SNAKE_CASE : str = RobertaTokenizerFast SCREAMING_SNAKE_CASE : List[str] = True SCREAMING_SNAKE_CASE : Optional[Any] = {'cls_token': '<s>'} def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __lowercase = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] __lowercase = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) ) __lowercase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] __lowercase = {'''unk_token''': '''<unk>'''} __lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file ,'''w''' ,encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**lowercase__ : Dict ): kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**lowercase__ : Optional[Any] ): kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : Union[str, Any] ): __lowercase = '''lower newer''' __lowercase = '''lower newer''' return input_text, output_text def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) __lowercase = '''lower newer''' __lowercase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] __lowercase = tokenizer.tokenize(lowercase__ ) # , add_prefix_space=True) self.assertListEqual(lowercase__ ,lowercase__ ) __lowercase = tokens + [tokenizer.unk_token] __lowercase = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): __lowercase = self.get_tokenizer() self.assertListEqual(tokenizer.encode('''Hello world!''' ,add_special_tokens=lowercase__ ) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] ) self.assertListEqual( tokenizer.encode('''Hello world! cécé herlolip 418''' ,add_special_tokens=lowercase__ ) ,[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] ,) @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): __lowercase = self.tokenizer_class.from_pretrained('''roberta-base''' ) __lowercase = tokenizer.encode('''sequence builders''' ,add_special_tokens=lowercase__ ) __lowercase = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=lowercase__ ) __lowercase = tokenizer.encode( '''sequence builders''' ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ ) __lowercase = tokenizer.encode( '''sequence builders''' ,'''multi-sequence build''' ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ ) __lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ ) __lowercase = tokenizer.build_inputs_with_special_tokens(lowercase__ ,lowercase__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = self.get_tokenizer() __lowercase = '''Encode this sequence.''' __lowercase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]] # Testing encoder arguments __lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ ) __lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowercase__ ,lowercase__ ) __lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ,add_prefix_space=lowercase__ ) __lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowercase__ ,lowercase__ ) tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} ) __lowercase = tokenizer.encode(lowercase__ ,add_special_tokens=lowercase__ ) __lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowercase__ ,lowercase__ ) # Testing spaces after special tokens __lowercase = '''<mask>''' tokenizer.add_special_tokens( {'''mask_token''': AddedToken(lowercase__ ,lstrip=lowercase__ ,rstrip=lowercase__ )} ) # mask token has a left space __lowercase = tokenizer.convert_tokens_to_ids(lowercase__ ) __lowercase = '''Encode <mask> sequence''' __lowercase = '''Encode <mask>sequence''' __lowercase = tokenizer.encode(lowercase__ ) __lowercase = encoded.index(lowercase__ ) __lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowercase__ ,lowercase__ ) __lowercase = tokenizer.encode(lowercase__ ) __lowercase = encoded.index(lowercase__ ) __lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowercase__ ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): pass def SCREAMING_SNAKE_CASE ( self : Any ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __lowercase = self.rust_tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ ) __lowercase = self.tokenizer_class.from_pretrained(lowercase__ ,**lowercase__ ) __lowercase = '''A, <mask> AllenNLP sentence.''' __lowercase = tokenizer_r.encode_plus(lowercase__ ,add_special_tokens=lowercase__ ,return_token_type_ids=lowercase__ ) __lowercase = tokenizer_p.encode_plus(lowercase__ ,add_special_tokens=lowercase__ ,return_token_type_ids=lowercase__ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) ,sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) ,sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) ,) __lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) __lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] ,[0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] ) self.assertSequenceEqual( lowercase__ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ ,['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) def SCREAMING_SNAKE_CASE ( self : int ): for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ): __lowercase = self.rust_tokenizer_class.from_pretrained( self.tmpdirname ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) __lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] ,lowercase__ ) self.assertEqual(post_processor_state['''add_prefix_space'''] ,lowercase__ ) self.assertEqual(post_processor_state['''trim_offsets'''] ,lowercase__ ) def SCREAMING_SNAKE_CASE ( self : List[Any] ): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): __lowercase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name` __lowercase = F"{text_of_1_token} {text_of_1_token}" __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) ,) __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowercase__ ) + 1, len(lowercase__ ) + 1 + len(lowercase__ )) ,) __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) ,) __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(lowercase__ ), len(lowercase__ ) + 1 + len(lowercase__ )) ,) __lowercase = F" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(lowercase__ ) + 1, 1 + len(lowercase__ ) + 1 + len(lowercase__ )) ,) __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) ,) __lowercase = self.rust_tokenizer_class.from_pretrained( lowercase__ ,use_fast=lowercase__ ,add_prefix_space=lowercase__ ,trim_offsets=lowercase__ ) __lowercase = tokenizer_r(lowercase__ ,return_offsets_mapping=lowercase__ ,add_special_tokens=lowercase__ ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(lowercase__ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(lowercase__ ), 1 + len(lowercase__ ) + 1 + len(lowercase__ )) ,)
104
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowercase = 16 __lowercase = 32 def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 , SCREAMING_SNAKE_CASE = "bert-base-cased" ): '''simple docstring''' __UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) __UpperCamelCase :int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __UpperCamelCase :Tuple = datasets.map( SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=SCREAMING_SNAKE_CASE ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCamelCase :List[str] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __UpperCamelCase :Union[str, Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = DataLoader( tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCamelCase :int = config['''lr'''] __UpperCamelCase :str = int(config['''num_epochs'''] ) __UpperCamelCase :Any = int(config['''seed'''] ) __UpperCamelCase :Dict = int(config['''batch_size'''] ) __UpperCamelCase :Optional[Any] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE ) __UpperCamelCase , __UpperCamelCase :Dict = get_dataloaders(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCamelCase :Any = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE ) # Instantiate optimizer __UpperCamelCase :List[str] = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __UpperCamelCase :Optional[Any] = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE ) if accelerator.state.deepspeed_plugin is not None: __UpperCamelCase :Dict = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __UpperCamelCase :Dict = 1 __UpperCamelCase :Tuple = (len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __UpperCamelCase :str = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE , ) else: __UpperCamelCase :Dict = DummyScheduler(SCREAMING_SNAKE_CASE , total_num_steps=SCREAMING_SNAKE_CASE , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :int = accelerator.prepare( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # We need to keep track of how many total steps we have iterated over __UpperCamelCase :List[Any] = 0 # We also need to keep track of the stating epoch so files are named properly __UpperCamelCase :Dict = 0 # Now we train the model __UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' ) __UpperCamelCase :Union[str, Any] = 0 __UpperCamelCase :Optional[int] = {} for epoch in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE ): __UpperCamelCase :Optional[Any] = model(**SCREAMING_SNAKE_CASE ) __UpperCamelCase :Tuple = outputs.loss __UpperCamelCase :str = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() __UpperCamelCase :Any = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCamelCase :Any = model(**SCREAMING_SNAKE_CASE ) __UpperCamelCase :Optional[int] = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times __UpperCamelCase , __UpperCamelCase :List[Any] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE ) - 1: __UpperCamelCase :List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] __UpperCamelCase :Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , ) __UpperCamelCase :Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: __UpperCamelCase :int = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def lowerCamelCase ( ): '''simple docstring''' __UpperCamelCase :Tuple = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=SCREAMING_SNAKE_CASE , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=SCREAMING_SNAKE_CASE , ) parser.add_argument( '''--output_dir''' , type=SCREAMING_SNAKE_CASE , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''Number of train epochs.''' , ) __UpperCamelCase :List[str] = parser.parse_args() __UpperCamelCase :Tuple = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
43
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __lowerCAmelCase ( UpperCamelCase__): _lowercase : List[str] = """Salesforce/blip-image-captioning-base""" _lowercase : int = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) _lowercase : Any = """image_captioner""" _lowercase : Union[str, Any] = AutoModelForVisionaSeq _lowercase : Dict = ["""image"""] _lowercase : Any = ["""text"""] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str: '''simple docstring''' requires_backends(self , ["vision"] ) super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.pre_processor(images=lowerCAmelCase__ , return_tensors="pt" ) def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.model.generate(**lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
148
from maths.prime_check import is_prime def _A ( SCREAMING_SNAKE_CASE : int ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): a__ : Dict =f'''Input value of [number={number}] must be an integer''' raise TypeError(SCREAMING_SNAKE_CASE ) if is_prime(SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
148
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__) lowerCAmelCase_ : Optional[Any] = { 'microsoft/swin-tiny-patch4-window7-224': ( 'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json' ), # See all Swin models at https://huggingface.co/models?filter=swin } class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" __a ='swin' __a ={ 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Any , __a : Optional[Any]=2_24 , __a : List[str]=4 , __a : List[Any]=3 , __a : Optional[Any]=96 , __a : str=[2, 2, 6, 2] , __a : Tuple=[3, 6, 12, 24] , __a : List[str]=7 , __a : Tuple=4.0 , __a : Optional[Any]=True , __a : Optional[Any]=0.0 , __a : Any=0.0 , __a : Tuple=0.1 , __a : Tuple="gelu" , __a : Union[str, Any]=False , __a : Optional[int]=0.02 , __a : Tuple=1e-5 , __a : List[str]=32 , __a : int=None , __a : Dict=None , **__a : Any , ): super().__init__(**__a ) _a = image_size _a = patch_size _a = num_channels _a = embed_dim _a = depths _a = len(__a ) _a = num_heads _a = window_size _a = mlp_ratio _a = qkv_bias _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = drop_path_rate _a = hidden_act _a = use_absolute_embeddings _a = layer_norm_eps _a = initializer_range _a = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _a = int(embed_dim * 2 ** (len(__a ) - 1) ) _a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )] _a , _a = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names ) class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ): """simple docstring""" __a =version.parse('1.11' ) @property def UpperCamelCase__ ( self : List[str] ): return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def UpperCamelCase__ ( self : str ): return 1e-4
63
def snake_case( __magic_name__ = 50 ) -> int: '''simple docstring''' lowercase : Union[str, Any] = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
308
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar _UpperCamelCase = TypeVar('''T''') class lowercase ( Generic[T] ): '''simple docstring''' def __init__(self , __a = True ) -> None: """simple docstring""" UpperCAmelCase__ = {} # dictionary of lists UpperCAmelCase__ = directed def UpperCamelCase__ (self , __a , __a ) -> GraphAdjacencyList[T]: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__a ) self.adj_list[destination_vertex].append(__a ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__a ) UpperCAmelCase__ = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(__a ) UpperCAmelCase__ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: UpperCAmelCase__ = [destination_vertex] UpperCAmelCase__ = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(__a ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(__a ) UpperCAmelCase__ = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: UpperCAmelCase__ = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: UpperCAmelCase__ = [destination_vertex] UpperCAmelCase__ = [] return self def __repr__(self ) -> str: """simple docstring""" return pformat(self.adj_list )
357
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase__ = XCLIPTextConfig() # derive patch size from model name UpperCAmelCase__ = model_name.find('patch' ) UpperCAmelCase__ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] ) UpperCAmelCase__ = XCLIPVisionConfig(patch_size=snake_case__ , num_frames=snake_case__ ) if "large" in model_name: UpperCAmelCase__ = 7_68 UpperCAmelCase__ = 30_72 UpperCAmelCase__ = 12 UpperCAmelCase__ = 10_24 UpperCAmelCase__ = 40_96 UpperCAmelCase__ = 16 UpperCAmelCase__ = 24 UpperCAmelCase__ = 7_68 UpperCAmelCase__ = 30_72 if model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ = 3_36 UpperCAmelCase__ = XCLIPConfig.from_text_vision_configs(snake_case__ , snake_case__ ) if "large" in model_name: UpperCAmelCase__ = 7_68 return config def UpperCamelCase_( snake_case__: Any ) -> Tuple: # text encoder if name == "token_embedding.weight": UpperCAmelCase__ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' ) if name == "positional_embedding": UpperCAmelCase__ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "ln_1" in name: UpperCAmelCase__ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: UpperCAmelCase__ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: UpperCAmelCase__ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: UpperCAmelCase__ = name.replace('c_proj' , 'fc2' ) if name.startswith('transformer.resblocks' ): UpperCAmelCase__ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' ) if "attn.out_proj" in name and "message" not in name: UpperCAmelCase__ = name.replace('attn.out_proj' , 'self_attn.out_proj' ) if "ln_final" in name: UpperCAmelCase__ = name.replace('ln_final' , 'text_model.final_layer_norm' ) # visual encoder if name == "visual.class_embedding": UpperCAmelCase__ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' ) if name == "visual.positional_embedding": UpperCAmelCase__ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' ) if name.startswith('visual.transformer.resblocks' ): UpperCAmelCase__ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' ) if "visual.conv1" in name: UpperCAmelCase__ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' ) if "visual.ln_pre" in name: UpperCAmelCase__ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' ) if "visual.ln_post" in name: UpperCAmelCase__ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' ) if "visual.proj" in name: UpperCAmelCase__ = name.replace('visual.proj' , 'visual_projection.weight' ) if "text_projection" in name: UpperCAmelCase__ = name.replace('text_projection' , 'text_projection.weight' ) # things on top if "prompts_visual_proj" in name: UpperCAmelCase__ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' ) if "prompts_visual_ln" in name: UpperCAmelCase__ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' ) # mit if name == "mit.positional_embedding": UpperCAmelCase__ = name.replace('positional' , 'position' ) if name.startswith('mit.resblocks' ): UpperCAmelCase__ = name.replace('mit.resblocks' , 'mit.encoder.layers' ) # prompts generator if name.startswith('prompts_generator.norm' ): UpperCAmelCase__ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' ) return name def UpperCamelCase_( snake_case__: Union[str, Any] , snake_case__: List[Any] ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): UpperCAmelCase__ = orig_state_dict.pop(snake_case__ ) if "attn.in_proj" in key: UpperCAmelCase__ = key.split('.' ) if key.startswith('visual' ): UpperCAmelCase__ = key_split[3] UpperCAmelCase__ = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: UpperCAmelCase__ = val[ :dim, : ] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[ -dim:, : ] else: UpperCAmelCase__ = val[ :dim ] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[ -dim: ] else: if "weight" in key: UpperCAmelCase__ = val[ :dim, : ] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[ -dim:, : ] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[-dim:] elif key.startswith('mit' ): UpperCAmelCase__ = key_split[2] UpperCAmelCase__ = config.vision_config.mit_hidden_size if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[dim : dim * 2, :] UpperCAmelCase__ = val[-dim:, :] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[dim : dim * 2] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = key_split[2] UpperCAmelCase__ = config.text_config.hidden_size if "weight" in key: UpperCAmelCase__ = val[:dim, :] UpperCAmelCase__ = val[ dim : dim * 2, : ] UpperCAmelCase__ = val[-dim:, :] else: UpperCAmelCase__ = val[:dim] UpperCAmelCase__ = val[ dim : dim * 2 ] UpperCAmelCase__ = val[-dim:] else: UpperCAmelCase__ = rename_key(snake_case__ ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: UpperCAmelCase__ = val.T UpperCAmelCase__ = val return orig_state_dict def UpperCamelCase_( snake_case__: Tuple ) -> Optional[Any]: if num_frames == 8: UpperCAmelCase__ = 'eating_spaghetti_8_frames.npy' elif num_frames == 16: UpperCAmelCase__ = 'eating_spaghetti.npy' elif num_frames == 32: UpperCAmelCase__ = 'eating_spaghetti_32_frames.npy' UpperCAmelCase__ = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename=snake_case__ , repo_type='dataset' , ) UpperCAmelCase__ = np.load(snake_case__ ) return list(snake_case__ ) def UpperCamelCase_( snake_case__: Tuple , snake_case__: str=None , snake_case__: Union[str, Any]=False ) -> List[Any]: UpperCAmelCase__ = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } UpperCAmelCase__ = model_to_url[model_name] UpperCAmelCase__ = 8 if "16-frames" in model_name: UpperCAmelCase__ = 16 elif "shot" in model_name: UpperCAmelCase__ = 32 UpperCAmelCase__ = get_xclip_config(snake_case__ , snake_case__ ) UpperCAmelCase__ = XCLIPModel(snake_case__ ) model.eval() if "drive" in checkpoint_url: UpperCAmelCase__ = 'pytorch_model.bin' gdown.cached_download(snake_case__ , snake_case__ , quiet=snake_case__ ) UpperCAmelCase__ = torch.load(snake_case__ , map_location='cpu' )['model'] else: UpperCAmelCase__ = torch.hub.load_state_dict_from_url(snake_case__ )['model'] UpperCAmelCase__ = convert_state_dict(snake_case__ , snake_case__ ) UpperCAmelCase__ = XCLIPModel(snake_case__ ) UpperCAmelCase__ , UpperCAmelCase__ = model.load_state_dict(snake_case__ , strict=snake_case__ ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() UpperCAmelCase__ = 3_36 if model_name == 'xclip-large-patch14-16-frames' else 2_24 UpperCAmelCase__ = VideoMAEImageProcessor(size=snake_case__ ) UpperCAmelCase__ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' ) UpperCAmelCase__ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' ) UpperCAmelCase__ = XCLIPProcessor(image_processor=snake_case__ , tokenizer=snake_case__ ) UpperCAmelCase__ = prepare_video(snake_case__ ) UpperCAmelCase__ = processor( text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=snake_case__ , return_tensors='pt' , padding=snake_case__ ) print('Shape of pixel values:' , inputs.pixel_values.shape ) with torch.no_grad(): UpperCAmelCase__ = model(**snake_case__ ) # Verify outputs UpperCAmelCase__ = outputs.logits_per_video UpperCAmelCase__ = logits_per_video.softmax(dim=1 ) print('Probs:' , snake_case__ ) # kinetics-400 if model_name == "xclip-base-patch32": UpperCAmelCase__ = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] ) elif model_name == "xclip-base-patch32-16-frames": UpperCAmelCase__ = torch.tensor([[7.0_999e-04, 9.9_883e-01, 4.5_580e-04]] ) elif model_name == "xclip-base-patch16": UpperCAmelCase__ = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] ) elif model_name == "xclip-base-patch16-16-frames": UpperCAmelCase__ = torch.tensor([[7.6_937e-04, 9.9_728e-01, 1.9_473e-03]] ) elif model_name == "xclip-large-patch14": UpperCAmelCase__ = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] ) elif model_name == "xclip-large-patch14-16-frames": UpperCAmelCase__ = torch.tensor([[3.3_877e-04, 9.9_937e-01, 2.8_888e-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": UpperCAmelCase__ = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": UpperCAmelCase__ = torch.tensor([[3.8_554e-04, 9.9_929e-01, 3.2_754e-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": UpperCAmelCase__ = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": UpperCAmelCase__ = torch.tensor([[7.1_890e-06, 9.9_994e-01, 5.6_559e-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": UpperCAmelCase__ = torch.tensor([[1.0_320e-05, 9.9_993e-01, 6.2_435e-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": UpperCAmelCase__ = torch.tensor([[4.1_377e-06, 9.9_990e-01, 9.8_386e-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": UpperCAmelCase__ = torch.tensor([[4.1_347e-05, 9.9_962e-01, 3.3_411e-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": UpperCAmelCase__ = torch.tensor([[8.5_857e-05, 9.9_928e-01, 6.3_291e-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": UpperCAmelCase__ = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": UpperCAmelCase__ = torch.tensor([[9.8_219e-04, 9.9_593e-01, 3.0_863e-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": UpperCAmelCase__ = torch.tensor([[3.5_082e-04, 9.9_785e-01, 1.7_966e-03]] ) else: raise ValueError(f"Model name {model_name} not supported" ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(snake_case__ ) if push_to_hub: print('Pushing model, processor and slow tokenizer files to the hub...' ) model.push_to_hub(snake_case__ , organization='nielsr' ) processor.push_to_hub(snake_case__ , organization='nielsr' ) slow_tokenizer.push_to_hub(snake_case__ , organization='nielsr' ) if __name__ == "__main__": _UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''xclip-base-patch32''', type=str, help='''Name of the model.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _UpperCamelCase = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
335
0
'''simple docstring''' from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. __lowerCamelCase = 10 def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int: for i in range(_lowerCAmelCase, _lowerCAmelCase ): if array[i] == target: return i return -1 def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int: A_ = 0 A_ = len(_lowerCAmelCase ) while left <= right: if right - left < precision: return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) A_ = (left + right) // 3 + 1 A_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: A_ = one_third - 1 elif array[two_third] < target: A_ = two_third + 1 else: A_ = one_third + 1 A_ = two_third - 1 else: return -1 def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int: if left < right: if right - left < precision: return lin_search(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) A_ = (left + right) // 3 + 1 A_ = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(_lowerCAmelCase, one_third - 1, _lowerCAmelCase, _lowerCAmelCase ) elif array[two_third] < target: return rec_ternary_search(two_third + 1, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase ) else: return rec_ternary_search(one_third + 1, two_third - 1, _lowerCAmelCase, _lowerCAmelCase ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() __lowerCamelCase = input('''Enter numbers separated by comma:\n''').strip() __lowerCamelCase = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." __lowerCamelCase = int(input('''Enter the number to be found in the list:\n''').strip()) __lowerCamelCase = ite_ternary_search(collection, target) __lowerCamelCase = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f"""Iterative search: {target} found at positions: {resulta}""") print(f"""Recursive search: {target} found at positions: {resulta}""") else: print('''Not found''')
162
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class _UpperCAmelCase ( unittest.TestCase): __a : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __snake_case ( self , _A , _A , _A ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : str = hf_hub_download( repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) _UpperCAmelCase : Tuple = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 ) _UpperCAmelCase : List[str] = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def __snake_case ( self , _A , _A ) -> Optional[int]: '''simple docstring''' for example in examples: _UpperCAmelCase : str = video_classifier(_A ) self.assertEqual( _A , [ {"""score""": ANY(_A ), """label""": ANY(_A )}, {"""score""": ANY(_A ), """label""": ANY(_A )}, ] , ) @require_torch def __snake_case ( self ) -> List[str]: '''simple docstring''' _UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" _UpperCAmelCase : Optional[Any] = VideoMAEFeatureExtractor( size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} ) _UpperCAmelCase : List[str] = pipeline( """video-classification""" , model=_A , feature_extractor=_A , frame_sampling_rate=4 ) _UpperCAmelCase : Union[str, Any] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) _UpperCAmelCase : Union[str, Any] = video_classifier(_A , top_k=2 ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , ) _UpperCAmelCase : int = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_A , decimals=4 ) , [ [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], ] , ) @require_tf def __snake_case ( self ) -> Any: '''simple docstring''' pass
246
0
'''simple docstring''' import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version __A =get_logger(__name__) class _snake_case : lowerCAmelCase :int = '''dummy_data''' lowerCAmelCase :Optional[Any] = '''datasets''' lowerCAmelCase :Optional[int] = False def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , ): UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : int = dataset_name UpperCAmelCase__ : Tuple = cache_dir UpperCAmelCase__ : Any = use_local_dummy_data UpperCAmelCase__ : str = config # download_callbacks take a single url as input UpperCAmelCase__ : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root UpperCAmelCase__ : List[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general UpperCAmelCase__ : str = str(_lowerCamelCase) # to be downloaded UpperCAmelCase__ : int = None UpperCAmelCase__ : List[str] = None @property def snake_case__ ( self): if self._dummy_file is None: UpperCAmelCase__ : List[Any] = self.download_dummy_data() return self._dummy_file @property def snake_case__ ( self): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("""dummy""" , self.config.name , self.version_name) # structure is dummy / version_name return os.path.join("""dummy""" , self.version_name) @property def snake_case__ ( self): return os.path.join(self.dummy_data_folder , """dummy_data.zip""") def snake_case__ ( self): UpperCAmelCase__ : Tuple = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) UpperCAmelCase__ : Optional[int] = cached_path( _lowerCamelCase , cache_dir=self.cache_dir , extract_compressed_file=_lowerCamelCase , force_extract=_lowerCamelCase) return os.path.join(_lowerCamelCase , self.dummy_file_name) @property def snake_case__ ( self): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file) @property def snake_case__ ( self): if self._bucket_url is None: UpperCAmelCase__ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , """/""")) return self._bucket_url @property def snake_case__ ( self): # return full path if its a dir if os.path.isdir(self.dummy_file): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , """/""").split("""/""")[:-1]) def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase): if self.load_existing_dummy_data: # dummy data is downloaded and tested UpperCAmelCase__ : Optional[int] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned UpperCAmelCase__ : Optional[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(_lowerCamelCase , _lowerCamelCase): return self.create_dummy_data_dict(_lowerCamelCase , _lowerCamelCase) elif isinstance(_lowerCamelCase , (list, tuple)): return self.create_dummy_data_list(_lowerCamelCase , _lowerCamelCase) else: return self.create_dummy_data_single(_lowerCamelCase , _lowerCamelCase) def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase): return self.download_and_extract(_lowerCamelCase) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): return self.download_and_extract(_lowerCamelCase) def snake_case__ ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase): return path def snake_case__ ( self): return {} def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : List[Any] = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(_lowerCamelCase , _lowerCamelCase): for single_url in single_urls: download_callback(_lowerCamelCase) else: UpperCAmelCase__ : Any = single_urls download_callback(_lowerCamelCase) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(_lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : List[Any] = [os.path.join(_lowerCamelCase , urllib.parse.quote_plus(Path(_lowerCamelCase).name)) for x in single_urls] else: UpperCAmelCase__ : Optional[Any] = single_urls UpperCAmelCase__ : int = os.path.join(_lowerCamelCase , urllib.parse.quote_plus(Path(_lowerCamelCase).name)) UpperCAmelCase__ : str = value # make sure that values are unique if all(isinstance(_lowerCamelCase , _lowerCamelCase) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len( dummy_data_dict.values()): # append key to value to make its name unique UpperCAmelCase__ : Optional[int] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Optional[Any] = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one UpperCAmelCase__ : Optional[int] = all(bool(re.findall("""[0-9]{3,}-of-[0-9]{3,}""" , _lowerCamelCase)) for url in data_url) UpperCAmelCase__ : Union[str, Any] = all( url.startswith("""https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed""") for url in data_url) if data_url and (is_tf_records or is_pubmed_records): UpperCAmelCase__ : Optional[Any] = [data_url[0]] * len(_lowerCamelCase) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(_lowerCamelCase) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase__ : Dict = os.path.join(_lowerCamelCase , urllib.parse.quote_plus(single_url.split("""/""")[-1])) dummy_data_list.append(_lowerCamelCase) return dummy_data_list def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase): for download_callback in self.download_callbacks: download_callback(_lowerCamelCase) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase__ : List[str] = os.path.join(_lowerCamelCase , urllib.parse.quote_plus(data_url.split("""/""")[-1])) if os.path.exists(_lowerCamelCase) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def snake_case__ ( self): pass def snake_case__ ( self): pass def snake_case__ ( self , _lowerCamelCase): def _iter_archive_members(_lowerCamelCase): # this preserves the order of the members inside the ZIP archive UpperCAmelCase__ : Optional[int] = Path(self.dummy_file).parent UpperCAmelCase__ : List[Any] = path.relative_to(_lowerCamelCase) with ZipFile(self.local_path_to_dummy_data) as zip_file: UpperCAmelCase__ : Dict = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix()): yield dummy_parent_path.joinpath(_lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = Path(_lowerCamelCase) UpperCAmelCase__ : Dict = _iter_archive_members(_lowerCamelCase) if self.use_local_dummy_data else path.rglob("""*""") for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((""".""", """__""")): yield file_path.relative_to(_lowerCamelCase).as_posix(), file_path.open("""rb""") def snake_case__ ( self , _lowerCamelCase): if not isinstance(_lowerCamelCase , _lowerCamelCase): UpperCAmelCase__ : Optional[Any] = [paths] for path in paths: if os.path.isfile(_lowerCamelCase): if os.path.basename(_lowerCamelCase).startswith((""".""", """__""")): return yield path else: for dirpath, dirnames, filenames in os.walk(_lowerCamelCase): if os.path.basename(_lowerCamelCase).startswith((""".""", """__""")): continue dirnames.sort() for filename in sorted(_lowerCamelCase): if filename.startswith((""".""", """__""")): continue yield os.path.join(_lowerCamelCase , _lowerCamelCase)
354
'''simple docstring''' def _UpperCamelCase ( UpperCamelCase__ = 1_0 , UpperCamelCase__ = 2_2 ): UpperCAmelCase__ : List[str] = range(1 , UpperCamelCase__ ) UpperCAmelCase__ : int = range(1 , UpperCamelCase__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f"""{solution(10, 22) = }""")
283
0
import math def UpperCAmelCase ( a_ , a_ = 0 , a_ = 0 ) -> list: """simple docstring""" __A = end or len(a_ ) for i in range(a_ , a_ ): __A = i __A = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __A = array[temp_index - 1] temp_index -= 1 __A = temp_index_value return array def UpperCAmelCase ( a_ , a_ , a_ ) -> None: # Max Heap """simple docstring""" __A = index __A = 2 * index + 1 # Left Node __A = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __A = left_index if right_index < heap_size and array[largest] < array[right_index]: __A = right_index if largest != index: __A , __A = array[largest], array[index] heapify(a_ , a_ , a_ ) def UpperCAmelCase ( a_ ) -> list: """simple docstring""" __A = len(a_ ) for i in range(n // 2 , -1 , -1 ): heapify(a_ , a_ , a_ ) for i in range(n - 1 , 0 , -1 ): __A , __A = array[0], array[i] heapify(a_ , 0 , a_ ) return array def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def UpperCAmelCase ( a_ , a_ , a_ , a_ ) -> int: """simple docstring""" __A = low __A = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __A , __A = array[j], array[i] i += 1 def UpperCAmelCase ( a_ ) -> list: """simple docstring""" if len(a_ ) == 0: return array __A = 2 * math.ceil(math.loga(len(a_ ) ) ) __A = 1_6 return intro_sort(a_ , 0 , len(a_ ) , a_ , a_ ) def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(a_ ) max_depth -= 1 __A = median_of_a(a_ , a_ , start + ((end - start) // 2) + 1 , end - 1 ) __A = partition(a_ , a_ , a_ , a_ ) intro_sort(a_ , a_ , a_ , a_ , a_ ) __A = p return insertion_sort(a_ , a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod() SCREAMING_SNAKE_CASE :List[Any] = input('Enter numbers separated by a comma : ').strip() SCREAMING_SNAKE_CASE :str = [float(item) for item in user_input.split(',')] print(sort(unsorted))
15
"""simple docstring""" from typing import Callable, Dict, Optional, Tuple import torch from torch import nn from torch.distributions import ( AffineTransform, Distribution, Independent, NegativeBinomial, Normal, StudentT, TransformedDistribution, ) class _UpperCAmelCase ( lowercase_ ): def __init__( self :int , __UpperCamelCase :Distribution , __UpperCamelCase :Dict=None , __UpperCamelCase :Optional[int]=None , __UpperCamelCase :List[str]=0 ): A = 1.0 if scale is None else scale A = 0.0 if loc is None else loc super().__init__(__UpperCamelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__UpperCamelCase )] ) @property def lowerCamelCase ( self :Any ): return self.base_dist.mean * self.scale + self.loc @property def lowerCamelCase ( self :Optional[int] ): return self.base_dist.variance * self.scale**2 @property def lowerCamelCase ( self :Dict ): return self.variance.sqrt() class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int , __UpperCamelCase :Dict[str, int] , __UpperCamelCase :Callable[..., Tuple[torch.Tensor]] , **__UpperCamelCase :str ): super().__init__(**__UpperCamelCase ) A = args_dim A = nn.ModuleList([nn.Linear(__UpperCamelCase , __UpperCamelCase ) for dim in args_dim.values()] ) A = domain_map def lowerCamelCase ( self :int , __UpperCamelCase :torch.Tensor ): A = [proj(__UpperCamelCase ) for proj in self.proj] return self.domain_map(*__UpperCamelCase ) class _UpperCAmelCase ( nn.Module ): def __init__( self :Dict , __UpperCamelCase :int ): super().__init__() A = function def lowerCamelCase ( self :List[str] , __UpperCamelCase :Any , *__UpperCamelCase :Any ): return self.function(__UpperCamelCase , *__UpperCamelCase ) class _UpperCAmelCase : UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 def __init__( self :Any , __UpperCamelCase :int = 1 ): A = dim A = {k: dim * self.args_dim[k] for k in self.args_dim} def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Dict ): if self.dim == 1: return self.distribution_class(*__UpperCamelCase ) else: return Independent(self.distribution_class(*__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :int , __UpperCamelCase :List[str] , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None , ): A = self._base_distribution(__UpperCamelCase ) if loc is None and scale is None: return distr else: return AffineTransformed(__UpperCamelCase , loc=__UpperCamelCase , scale=__UpperCamelCase , event_dim=self.event_dim ) @property def lowerCamelCase ( self :List[Any] ): return () if self.dim == 1 else (self.dim,) @property def lowerCamelCase ( self :Tuple ): return len(self.event_shape ) @property def lowerCamelCase ( self :int ): return 0.0 def lowerCamelCase ( self :str , __UpperCamelCase :int ): return ParameterProjection( in_features=__UpperCamelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , ) def lowerCamelCase ( self :List[Any] , *__UpperCamelCase :torch.Tensor ): raise NotImplementedError() @staticmethod def lowerCamelCase ( __UpperCamelCase :torch.Tensor ): return (x + torch.sqrt(torch.square(__UpperCamelCase ) + 4.0 )) / 2.0 class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"df": 1, "loc": 1, "scale": 1} UpperCamelCase = StudentT @classmethod def lowerCamelCase ( cls :List[str] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) A = 2.0 + cls.squareplus(__UpperCamelCase ) return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"loc": 1, "scale": 1} UpperCamelCase = Normal @classmethod def lowerCamelCase ( cls :List[Any] , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ).clamp_min(torch.finfo(scale.dtype ).eps ) return loc.squeeze(-1 ), scale.squeeze(-1 ) class _UpperCAmelCase ( lowercase_ ): UpperCamelCase = {"total_count": 1, "logits": 1} UpperCamelCase = NegativeBinomial @classmethod def lowerCamelCase ( cls :str , __UpperCamelCase :torch.Tensor , __UpperCamelCase :torch.Tensor ): A = cls.squareplus(__UpperCamelCase ) return total_count.squeeze(-1 ), logits.squeeze(-1 ) def lowerCamelCase ( self :Tuple , __UpperCamelCase :List[str] ): A, A = distr_args if self.dim == 1: return self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) else: return Independent(self.distribution_class(total_count=__UpperCamelCase , logits=__UpperCamelCase ) , 1 ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :str , __UpperCamelCase :Optional[torch.Tensor] = None , __UpperCamelCase :Optional[torch.Tensor] = None ): A, A = distr_args if scale is not None: # See scaling property of Gamma. logits += scale.log() return self._base_distribution((total_count, logits) )
292
0
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase_ (__a : Any , __a : str , __a : List[Any] ): """simple docstring""" _a : str = LxmertConfig.from_json_file(__a ) print(f"""Building PyTorch model from configuration: {config}""" ) _a : Union[str, Any] = LxmertForPreTraining(__a ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(__a , __a , __a ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __a ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __lowerCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
5
'''simple docstring''' import comet # From: unbabel-comet import torch import datasets __lowerCAmelCase = datasets.logging.get_logger(__name__) __lowerCAmelCase = """\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } """ __lowerCAmelCase = """\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. """ __lowerCAmelCase = """ COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric('comet') >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__ ( datasets.Metric ): """simple docstring""" def __lowercase ( self : Optional[int] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='https://unbabel.github.io/COMET/html/index.html' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'sources': datasets.Value('string' ,id='sequence' ), 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,codebase_urls=['https://github.com/Unbabel/COMET'] ,reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ] ,) def __lowercase ( self : int ,_a : int ): '''simple docstring''' if self.config_name == "default": _a : List[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: _a : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def __lowercase ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Optional[Any] ,_a : List[str]=None ,_a : Tuple=False ): '''simple docstring''' if gpus is None: _a : str = 1 if torch.cuda.is_available() else 0 _a : Optional[Any] = {'src': sources, 'mt': predictions, 'ref': references} _a : Optional[Any] = [dict(zip(_a ,_a ) ) for t in zip(*data.values() )] _a, _a : Tuple = self.scorer.predict(_a ,gpus=_a ,progress_bar=_a ) return {"mean_score": mean_score, "scores": scores}
5
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _snake_case : Any = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Any = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Dict = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Tuple = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys _snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
292
'''simple docstring''' from collections.abc import Generator def __magic_name__( ): __lowerCAmelCase , __lowerCAmelCase = 0, 1 while True: __lowerCAmelCase , __lowerCAmelCase = b, a + b yield b def __magic_name__( lowerCamelCase = 1_0_0_0): __lowerCAmelCase = 1 __lowerCAmelCase = fibonacci_generator() while len(str(next(lowerCamelCase))) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
174
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer lowercase =logging.get_logger(__name__) lowercase ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all MVP models at https://huggingface.co/models?filter=mvp lowercase ={ 'vocab_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json', }, 'added_tokens.json': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json', }, 'merges_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt', }, 'tokenizer_file': { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json', }, } lowercase ={ 'RUCAIBox/mvp': 1024, } class __magic_name__ ( a__ ): UpperCAmelCase =VOCAB_FILES_NAMES UpperCAmelCase =PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase =["input_ids", "attention_mask"] UpperCAmelCase =MvpTokenizer def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> Optional[Any]: '''simple docstring''' super().__init__( snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , ) _UpperCAmelCase : str =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , snake_case) != add_prefix_space: _UpperCAmelCase : List[str] =getattr(snake_case , pre_tok_state.pop('type')) _UpperCAmelCase : Dict =add_prefix_space _UpperCAmelCase : str =pre_tok_class(**snake_case) _UpperCAmelCase : Union[str, Any] =add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _UpperCAmelCase : int ="post_processor" _UpperCAmelCase : List[str] =getattr(self.backend_tokenizer , snake_case , snake_case) if tokenizer_component_instance: _UpperCAmelCase : Union[str, Any] =json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _UpperCAmelCase : List[str] =tuple(state['sep']) if "cls" in state: _UpperCAmelCase : Dict =tuple(state['cls']) _UpperCAmelCase : str =False if state.get('add_prefix_space' , snake_case) != add_prefix_space: _UpperCAmelCase : List[str] =add_prefix_space _UpperCAmelCase : Optional[Any] =True if state.get('trim_offsets' , snake_case) != trim_offsets: _UpperCAmelCase : Dict =trim_offsets _UpperCAmelCase : Tuple =True if changes_to_apply: _UpperCAmelCase : List[Any] =getattr(snake_case , state.pop('type')) _UpperCAmelCase : Union[str, Any] =component_class(**snake_case) setattr(self.backend_tokenizer , snake_case , snake_case) @property def lowerCAmelCase ( self) -> int: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def lowerCAmelCase ( self , snake_case) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : Dict =AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case) if isinstance(snake_case , snake_case) else value _UpperCAmelCase : List[str] =value def lowerCAmelCase ( self , *snake_case , **snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : List[str] =kwargs.get('is_split_into_words' , snake_case) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*snake_case , **snake_case) def lowerCAmelCase ( self , *snake_case , **snake_case) -> str: '''simple docstring''' _UpperCAmelCase : Optional[Any] =kwargs.get('is_split_into_words' , snake_case) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " 'to use it with pretokenized inputs.') return super()._encode_plus(*snake_case , **snake_case) def lowerCAmelCase ( self , snake_case , snake_case = None) -> Tuple: '''simple docstring''' _UpperCAmelCase : Tuple =self._tokenizer.model.save(snake_case , name=snake_case) return tuple(snake_case) def lowerCAmelCase ( self , snake_case , snake_case=None) -> int: '''simple docstring''' _UpperCAmelCase : Dict =[self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self , snake_case , snake_case = None) -> int: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =[self.sep_token_id] _UpperCAmelCase : str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
370
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore lowercase ='\nHuman: <<task>>\n\nAssistant: ' lowercase ='huggingface-tools/default-prompts' lowercase ={'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int="run" ): '''simple docstring''' if prompt_or_repo_id is None: _UpperCAmelCase : List[str] =DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('\\s' , __lowerCamelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase : Dict =cached_file( __lowerCamelCase , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} ) with open(__lowerCamelCase , 'r' , encoding='utf-8' ) as f: return f.read()
242
0
"""simple docstring""" import math import random from typing import Any from .hill_climbing import SearchProblem def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = math.inf , __UpperCamelCase = -math.inf , __UpperCamelCase = math.inf , __UpperCamelCase = -math.inf , __UpperCamelCase = False , __UpperCamelCase = 1_0_0 , __UpperCamelCase = 0.01 , __UpperCamelCase = 1 , ): """simple docstring""" __A = False __A = search_prob __A = start_temperate __A = [] __A = 0 __A = None while not search_end: __A = current_state.score() if best_state is None or current_score > best_state.score(): __A = current_state scores.append(__UpperCamelCase ) iterations += 1 __A = None __A = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __A = random.randint(0 , len(__UpperCamelCase ) - 1 ) # picking a random neighbor __A = neighbors.pop(__UpperCamelCase ) __A = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __A = change * -1 # in case we are finding minimum if change > 0: # improves the solution __A = picked_neighbor else: __A = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __A = picked_neighbor __A = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __A = True else: __A = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__UpperCamelCase ) , __UpperCamelCase ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) lowercase_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( 'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 ' F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" return (3 * x**2) - (6 * y) lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing(prob, find_max=False, visualization=True) print( 'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' F'''{local_min.score()}''' ) lowercase_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) lowercase_ = simulated_annealing(prob, find_max=True, visualization=True) print( 'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: ' F'''{local_min.score()}''' )
266
"""simple docstring""" import importlib import inspect import json import os import re import shutil import sys from pathlib import Path from typing import Dict, Optional, Union from urllib import request from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from packaging import version from .. import __version__ from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging lowercase_ = ( 'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py' ) lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name def lowerCAmelCase ( ): """simple docstring""" __A = '''https://pypi.org/pypi/diffusers/json''' __A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys() return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) ) def lowerCAmelCase ( ): """simple docstring""" if HF_MODULES_CACHE in sys.path: return sys.path.append(__UpperCamelCase ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = Path(__UpperCamelCase ) / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" init_hf_modules() __A = Path(__UpperCamelCase ) / name # If the parent module does not exist yet, recursively create it. if not dynamic_module_path.parent.exists(): create_dynamic_module(dynamic_module_path.parent ) os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) __A = dynamic_module_path / '''__init__.py''' if not init_path.exists(): init_path.touch() def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import .xxx` __A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from .xxx import yyy` relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Unique-ify return list(set(__UpperCamelCase ) ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" __A = False __A = [module_file] __A = [] # Let's recurse through all relative imports while not no_change: __A = [] for f in files_to_check: new_imports.extend(get_relative_imports(__UpperCamelCase ) ) __A = Path(__UpperCamelCase ).parent __A = [str(module_path / m ) for m in new_imports] __A = [f for f in new_import_files if f not in all_relative_imports] __A = [f'{f}.py' for f in new_import_files] __A = len(__UpperCamelCase ) == 0 all_relative_imports.extend(__UpperCamelCase ) return all_relative_imports def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f: __A = f.read() # Imports of the form `import xxx` __A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE ) # Imports of the form `from xxx import yyy` imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE ) # Only keep the top-level module __A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )] # Unique-ify and test we got them all __A = list(set(__UpperCamelCase ) ) __A = [] for imp in imports: try: importlib.import_module(__UpperCamelCase ) except ImportError: missing_packages.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: raise ImportError( '''This modeling file requires the following packages that were not found in your environment: ''' f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' ) return get_relative_imports(__UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): """simple docstring""" __A = module_path.replace(os.path.sep , '''.''' ) __A = importlib.import_module(__UpperCamelCase ) if class_name is None: return find_pipeline_class(__UpperCamelCase ) return getattr(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase ): """simple docstring""" from ..pipelines import DiffusionPipeline __A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) ) __A = None for cls_name, cls in cls_members.items(): if ( cls_name != DiffusionPipeline.__name__ and issubclass(cls , __UpperCamelCase ) and cls.__module__.split('''.''' )[0] != "diffusers" ): if pipeline_class is not None: raise ValueError( f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:' f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in' f' {loaded_module}.' ) __A = cls return pipeline_class def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ): """simple docstring""" __A = str(__UpperCamelCase ) __A = os.path.join(__UpperCamelCase , __UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): __A = module_file_or_url __A = '''local''' elif pretrained_model_name_or_path.count('''/''' ) == 0: __A = get_diffusers_versions() # cut ".dev0" __A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] ) # retrieve github version that matches if revision is None: __A = latest_version if latest_version[1:] in available_versions else '''main''' logger.info(f'Defaulting to latest_version: {revision}.' ) elif revision in available_versions: __A = f'v{revision}' elif revision == "main": __A = revision else: raise ValueError( f'`custom_revision`: {revision} does not exist. Please make sure to choose one of' f' {", ".join(available_versions + ["main"] )}.' ) # community pipeline on GitHub __A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase ) try: __A = cached_download( __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = '''git''' __A = pretrained_model_name_or_path + '''.py''' except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise else: try: # Load from URL or cache if already cached __A = hf_hub_download( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , ) __A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) ) except EnvironmentError: logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' ) raise # Check we have all the requirements in our environment __A = check_imports(__UpperCamelCase ) # Now we move the module inside our cached dynamic modules. __A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule create_dynamic_module(__UpperCamelCase ) __A = Path(__UpperCamelCase ) / full_submodule if submodule == "local" or submodule == "git": # We always copy local files (we could hash the file to see if there was a change, and give them the name of # that hash, to only copy when there is a modification but it seems overkill for now). # The only reason we do the copy is to avoid putting too many folders in sys.path. shutil.copy(__UpperCamelCase , submodule_path / module_file ) for module_needed in modules_needed: __A = f'{module_needed}.py' shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed ) else: # Get the commit hash # TODO: we will get this info in the etag soon, so retrieve it from there and not here. if isinstance(__UpperCamelCase , __UpperCamelCase ): __A = use_auth_token elif use_auth_token is True: __A = HfFolder.get_token() else: __A = None __A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha # The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the # benefit of versioning. __A = submodule_path / commit_hash __A = full_submodule + os.path.sep + commit_hash create_dynamic_module(__UpperCamelCase ) if not (submodule_path / module_file).exists(): shutil.copy(__UpperCamelCase , submodule_path / module_file ) # Make sure we also have every file with relative for module_needed in modules_needed: if not (submodule_path / module_needed).exists(): get_cached_module_file( __UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return os.path.join(__UpperCamelCase , __UpperCamelCase ) def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ): """simple docstring""" __A = get_cached_module_file( __UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , ) return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
266
1
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __lowercase = '''sshleifer/bart-tiny-random''' __lowercase = '''patrickvonplaten/t5-tiny-random''' @require_torch class _lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase_ ( self : Any ) -> Optional[int]: '''simple docstring''' return AutoConfig.from_pretrained(__lowerCAmelCase ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def UpperCAmelCase_ ( self : Any ) -> Tuple: '''simple docstring''' __UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=__lowerCAmelCase ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def UpperCAmelCase_ ( self : int ) -> Dict: '''simple docstring''' __UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def UpperCAmelCase_ ( self : List[str] ) -> List[Any]: '''simple docstring''' with self.assertRaises(__lowerCAmelCase ): create_student_by_copying_alternating_layers(__lowerCAmelCase , tempfile.mkdtemp() , e=__lowerCAmelCase , d=__lowerCAmelCase )
358
"""simple docstring""" def lowerCAmelCase (__UpperCamelCase : int = 3 , __UpperCamelCase : int = 7 , __UpperCamelCase : int = 1_0_0_0_0_0_0 ): """simple docstring""" __UpperCamelCase =0 __UpperCamelCase =1 for current_denominator in range(1 , limit + 1 ): __UpperCamelCase =current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: __UpperCamelCase =current_numerator __UpperCamelCase =current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_000_000))
85
0
import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A : Dict = logging.get_logger(__name__) A : Optional[Any] = { '''b0''': efficientnet.EfficientNetBa, '''b1''': efficientnet.EfficientNetBa, '''b2''': efficientnet.EfficientNetBa, '''b3''': efficientnet.EfficientNetBa, '''b4''': efficientnet.EfficientNetBa, '''b5''': efficientnet.EfficientNetBa, '''b6''': efficientnet.EfficientNetBa, '''b7''': efficientnet.EfficientNetBa, } A : Dict = { '''b0''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.0, '''image_size''': 2_2_4, '''dropout_rate''': 0.2, '''dw_padding''': [], }, '''b1''': { '''hidden_dim''': 1_2_8_0, '''width_coef''': 1.0, '''depth_coef''': 1.1, '''image_size''': 2_4_0, '''dropout_rate''': 0.2, '''dw_padding''': [1_6], }, '''b2''': { '''hidden_dim''': 1_4_0_8, '''width_coef''': 1.1, '''depth_coef''': 1.2, '''image_size''': 2_6_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 8, 1_6], }, '''b3''': { '''hidden_dim''': 1_5_3_6, '''width_coef''': 1.2, '''depth_coef''': 1.4, '''image_size''': 3_0_0, '''dropout_rate''': 0.3, '''dw_padding''': [5, 1_8], }, '''b4''': { '''hidden_dim''': 1_7_9_2, '''width_coef''': 1.4, '''depth_coef''': 1.8, '''image_size''': 3_8_0, '''dropout_rate''': 0.4, '''dw_padding''': [6], }, '''b5''': { '''hidden_dim''': 2_0_4_8, '''width_coef''': 1.6, '''depth_coef''': 2.2, '''image_size''': 4_5_6, '''dropout_rate''': 0.4, '''dw_padding''': [1_3, 2_7], }, '''b6''': { '''hidden_dim''': 2_3_0_4, '''width_coef''': 1.8, '''depth_coef''': 2.6, '''image_size''': 5_2_8, '''dropout_rate''': 0.5, '''dw_padding''': [3_1], }, '''b7''': { '''hidden_dim''': 2_5_6_0, '''width_coef''': 2.0, '''depth_coef''': 3.1, '''image_size''': 6_0_0, '''dropout_rate''': 0.5, '''dw_padding''': [1_8], }, } def __lowerCamelCase ( __a :Optional[Any] ) -> Dict: """simple docstring""" A__ = EfficientNetConfig() A__ = CONFIG_MAP[model_name]["""hidden_dim"""] A__ = CONFIG_MAP[model_name]["""width_coef"""] A__ = CONFIG_MAP[model_name]["""depth_coef"""] A__ = CONFIG_MAP[model_name]["""image_size"""] A__ = CONFIG_MAP[model_name]["""dropout_rate"""] A__ = CONFIG_MAP[model_name]["""dw_padding"""] A__ = """huggingface/label-files""" A__ = """imagenet-1k-id2label.json""" A__ = 1_0_0_0 A__ = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) ) A__ = {int(__lowerCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} return config def __lowerCamelCase ( ) -> Any: """simple docstring""" A__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" A__ = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im def __lowerCamelCase ( __a :List[str] ) -> Optional[Any]: """simple docstring""" A__ = CONFIG_MAP[model_name]["""image_size"""] A__ = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=__lowerCamelCase , ) return preprocessor def __lowerCamelCase ( __a :Tuple ) -> Union[str, Any]: """simple docstring""" A__ = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] A__ = sorted(set(__lowerCamelCase ) ) A__ = len(__lowerCamelCase ) A__ = {b: str(__lowerCamelCase ) for b, i in zip(__lowerCamelCase , range(__lowerCamelCase ) )} A__ = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: A__ = block_name_mapping[b] rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') ) rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') ) rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') ) rename_keys.append( (F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') ) rename_keys.append( (F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') ) rename_keys.append( (F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') ) rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') ) rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') ) rename_keys.append( (F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') ) rename_keys.append( (F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') ) rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') ) rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') ) rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') ) rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') ) rename_keys.append( (F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') ) rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') ) rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') ) rename_keys.append( (F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') ) rename_keys.append( (F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) A__ = {} for item in rename_keys: if item[0] in original_param_names: A__ = """efficientnet.""" + item[1] A__ = """classifier.weight""" A__ = """classifier.bias""" return key_mapping def __lowerCamelCase ( __a :List[Any] , __a :List[str] , __a :Optional[int] ) -> Optional[int]: """simple docstring""" for key, value in tf_params.items(): if "normalization" in key: continue A__ = key_mapping[key] if "_conv" in key and "kernel" in key: A__ = torch.from_numpy(__lowerCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: A__ = torch.from_numpy(__lowerCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: A__ = torch.from_numpy(np.transpose(__lowerCamelCase ) ) else: A__ = torch.from_numpy(__lowerCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(__lowerCamelCase ) @torch.no_grad() def __lowerCamelCase ( __a :Dict , __a :Union[str, Any] , __a :List[str] , __a :List[Any] ) -> Union[str, Any]: """simple docstring""" A__ = model_classes[model_name]( include_top=__lowerCamelCase , weights="""imagenet""" , input_tensor=__lowerCamelCase , input_shape=__lowerCamelCase , pooling=__lowerCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) A__ = original_model.trainable_variables A__ = original_model.non_trainable_variables A__ = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: A__ = param.numpy() A__ = list(tf_params.keys() ) # Load HuggingFace model A__ = get_efficientnet_config(__lowerCamelCase ) A__ = EfficientNetForImageClassification(__lowerCamelCase ).eval() A__ = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) A__ = rename_keys(__lowerCamelCase ) replace_params(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Initialize preprocessor and preprocess input image A__ = convert_image_processor(__lowerCamelCase ) A__ = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): A__ = hf_model(**__lowerCamelCase ) A__ = outputs.logits.detach().numpy() # Original model inference A__ = False A__ = CONFIG_MAP[model_name]["""image_size"""] A__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) A__ = image.img_to_array(__lowerCamelCase ) A__ = np.expand_dims(__lowerCamelCase , axis=0 ) A__ = original_model.predict(__lowerCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(__lowerCamelCase ): os.mkdir(__lowerCamelCase ) # Save converted model and image processor hf_model.save_pretrained(__lowerCamelCase ) preprocessor.save_pretrained(__lowerCamelCase ) if push_to_hub: # Push model and image processor to hub print(F'Pushing converted {model_name} to the hub...' ) A__ = F'efficientnet-{model_name}' preprocessor.push_to_hub(__lowerCamelCase ) hf_model.push_to_hub(__lowerCamelCase ) if __name__ == "__main__": A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''b0''', type=str, help='''Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''hf_model''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--save_model''', action='''store_true''', help='''Save model to local''') parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''') A : List[str] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
274
import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCAmelCase_ =["input_features", "is_longer"] def __init__( self , _A=64 , _A=48000 , _A=480 , _A=10 , _A=1024 , _A=0.0 , _A=False , _A = 0 , _A = 14000 , _A = None , _A = "fusion" , _A = "repeatpad" , **_A , ) -> Dict: super().__init__( feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , ) SCREAMING_SNAKE_CASE_ = top_db SCREAMING_SNAKE_CASE_ = truncation SCREAMING_SNAKE_CASE_ = padding SCREAMING_SNAKE_CASE_ = fft_window_size SCREAMING_SNAKE_CASE_ = (fft_window_size >> 1) + 1 SCREAMING_SNAKE_CASE_ = hop_length SCREAMING_SNAKE_CASE_ = max_length_s SCREAMING_SNAKE_CASE_ = max_length_s * sampling_rate SCREAMING_SNAKE_CASE_ = sampling_rate SCREAMING_SNAKE_CASE_ = frequency_min SCREAMING_SNAKE_CASE_ = frequency_max SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , ) SCREAMING_SNAKE_CASE_ = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ) def _UpperCamelCase ( self ) -> Dict[str, Any]: SCREAMING_SNAKE_CASE_ = copy.deepcopy(self.__dict__ ) SCREAMING_SNAKE_CASE_ = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def _UpperCamelCase ( self , _A , _A = None ) -> np.ndarray: SCREAMING_SNAKE_CASE_ = spectrogram( _A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , ) return log_mel_spectrogram.T def _UpperCamelCase ( self , _A , _A , _A ) -> Optional[Any]: SCREAMING_SNAKE_CASE_ = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 ) if len(ranges[1] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] if len(ranges[2] ) == 0: # if the audio is too short, we just use the first chunk SCREAMING_SNAKE_CASE_ = [0] # randomly choose index for each part SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[0] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[1] ) SCREAMING_SNAKE_CASE_ = np.random.choice(ranges[2] ) SCREAMING_SNAKE_CASE_ = mel[idx_front : idx_front + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_middle : idx_middle + chunk_frames, :] SCREAMING_SNAKE_CASE_ = mel[idx_back : idx_back + chunk_frames, :] SCREAMING_SNAKE_CASE_ = torch.tensor(mel[None, None, :] ) SCREAMING_SNAKE_CASE_ = torch.nn.functional.interpolate( _A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A ) SCREAMING_SNAKE_CASE_ = mel_shrink[0][0].numpy() SCREAMING_SNAKE_CASE_ = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 ) return mel_fusion def _UpperCamelCase ( self , _A , _A , _A , _A ) -> np.array: if waveform.shape[0] > max_length: if truncation == "rand_trunc": SCREAMING_SNAKE_CASE_ = True # random crop to max_length (for compatibility) -> this should be handled by self.pad SCREAMING_SNAKE_CASE_ = len(_A ) - max_length SCREAMING_SNAKE_CASE_ = np.random.randint(0 , overflow + 1 ) SCREAMING_SNAKE_CASE_ = waveform[idx : idx + max_length] SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :] elif truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters ) SCREAMING_SNAKE_CASE_ = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed SCREAMING_SNAKE_CASE_ = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. SCREAMING_SNAKE_CASE_ = np.stack([mel, mel, mel, mel] , axis=0 ) SCREAMING_SNAKE_CASE_ = False else: SCREAMING_SNAKE_CASE_ = self._random_mel_fusion(_A , _A , _A ) SCREAMING_SNAKE_CASE_ = True else: raise NotImplementedError(F'''data_truncating {truncation} not implemented''' ) else: SCREAMING_SNAKE_CASE_ = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length] if padding == "repeatpad": SCREAMING_SNAKE_CASE_ = int(max_length / len(_A ) ) SCREAMING_SNAKE_CASE_ = np.stack(np.tile(_A , _A ) ) SCREAMING_SNAKE_CASE_ = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 ) if truncation == "fusion": SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters ) SCREAMING_SNAKE_CASE_ = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 ) else: SCREAMING_SNAKE_CASE_ = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :] return input_mel, longer def __call__( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , **_A , ) -> BatchFeature: SCREAMING_SNAKE_CASE_ = truncation if truncation is not None else self.truncation SCREAMING_SNAKE_CASE_ = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) SCREAMING_SNAKE_CASE_ = is_batched_numpy or ( isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech] elif not is_batched and not isinstance(_A , np.ndarray ): SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa ) elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE_ = [np.asarray(_A )] # convert to mel spectrogram, truncate and pad if needed. SCREAMING_SNAKE_CASE_ = [ self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A ) for waveform in raw_speech ] SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for mel, longer in padded_inputs: input_mel.append(_A ) is_longer.append(_A ) if truncation == "fusion" and sum(_A ) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer SCREAMING_SNAKE_CASE_ = np.random.randint(0 , len(_A ) ) SCREAMING_SNAKE_CASE_ = True if isinstance(input_mel[0] , _A ): SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel] # is_longer is a list of bool SCREAMING_SNAKE_CASE_ = [[longer] for longer in is_longer] SCREAMING_SNAKE_CASE_ = {'''input_features''': input_mel, '''is_longer''': is_longer} SCREAMING_SNAKE_CASE_ = BatchFeature(_A ) if return_tensors is not None: SCREAMING_SNAKE_CASE_ = input_features.convert_to_tensors(_A ) return input_features
299
0
import torch from torch import nn class __A ( nn.Module ): def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=1 , UpperCAmelCase_=False ): super().__init__() lowerCamelCase =n_token lowerCamelCase =d_embed lowerCamelCase =d_proj lowerCamelCase =cutoffs + [n_token] lowerCamelCase =[0] + self.cutoffs lowerCamelCase =div_val lowerCamelCase =self.cutoffs[0] lowerCamelCase =len(self.cutoffs ) - 1 lowerCamelCase =self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowerCamelCase =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowerCamelCase =nn.Parameter(torch.zeros(self.n_clusters ) ) lowerCamelCase =nn.ModuleList() lowerCamelCase =nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) else: self.out_projs.append(UpperCAmelCase_ ) self.out_layers.append(nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) ) else: for i in range(len(self.cutoffs ) ): lowerCamelCase , lowerCamelCase =self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCamelCase =d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) self.out_layers.append(nn.Linear(UpperCAmelCase_ , r_idx - l_idx ) ) lowerCamelCase =keep_order def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): if proj is None: lowerCamelCase =nn.functional.linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowerCamelCase =nn.functional.linear(UpperCAmelCase_ , proj.t().contiguous() ) lowerCamelCase =nn.functional.linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=False ): if labels is not None: # Shift so that tokens < n predict n lowerCamelCase =hidden[..., :-1, :].contiguous() lowerCamelCase =labels[..., 1:].contiguous() lowerCamelCase =hidden.view(-1 , hidden.size(-1 ) ) lowerCamelCase =labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" ) else: lowerCamelCase =hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowerCamelCase =self._compute_logit(UpperCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowerCamelCase =labels != -100 lowerCamelCase =torch.zeros_like(UpperCAmelCase_ , dtype=hidden.dtype , device=hidden.device ) lowerCamelCase =( -nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowerCamelCase =nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 ) else: # construct weights and biases lowerCamelCase , lowerCamelCase =[], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCamelCase , lowerCamelCase =self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCamelCase =self.out_layers[0].weight[l_idx:r_idx] lowerCamelCase =self.out_layers[0].bias[l_idx:r_idx] else: lowerCamelCase =self.out_layers[i].weight lowerCamelCase =self.out_layers[i].bias if i == 0: lowerCamelCase =torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCamelCase =torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(UpperCAmelCase_ ) biases.append(UpperCAmelCase_ ) lowerCamelCase , lowerCamelCase , lowerCamelCase =weights[0], biases[0], self.out_projs[0] lowerCamelCase =self._compute_logit(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase =nn.functional.log_softmax(UpperCAmelCase_ , dim=1 ) if labels is None: lowerCamelCase =hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowerCamelCase =torch.zeros_like(UpperCAmelCase_ , dtype=hidden.dtype , device=hidden.device ) lowerCamelCase =0 lowerCamelCase =[0] + self.cutoffs for i in range(len(UpperCAmelCase_ ) - 1 ): lowerCamelCase , lowerCamelCase =cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowerCamelCase =(labels >= l_idx) & (labels < r_idx) lowerCamelCase =mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowerCamelCase =labels.index_select(0 , UpperCAmelCase_ ) - l_idx lowerCamelCase =head_logprob.index_select(0 , UpperCAmelCase_ ) lowerCamelCase =hidden.index_select(0 , UpperCAmelCase_ ) else: lowerCamelCase =hidden if i == 0: if labels is not None: lowerCamelCase =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowerCamelCase =head_logprob[:, : self.cutoffs[0]] else: lowerCamelCase , lowerCamelCase , lowerCamelCase =weights[i], biases[i], self.out_projs[i] lowerCamelCase =self._compute_logit(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase =nn.functional.log_softmax(UpperCAmelCase_ , dim=1 ) lowerCamelCase =self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowerCamelCase =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowerCamelCase =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowerCamelCase =logprob_i if labels is not None: if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order: out.index_copy_(0 , UpperCAmelCase_ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def _snake_case ( self , UpperCAmelCase_ ): if self.n_clusters == 0: lowerCamelCase =self._compute_logit(UpperCAmelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 ) else: # construct weights and biases lowerCamelCase , lowerCamelCase =[], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowerCamelCase , lowerCamelCase =self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCamelCase =self.out_layers[0].weight[l_idx:r_idx] lowerCamelCase =self.out_layers[0].bias[l_idx:r_idx] else: lowerCamelCase =self.out_layers[i].weight lowerCamelCase =self.out_layers[i].bias if i == 0: lowerCamelCase =torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowerCamelCase =torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(UpperCAmelCase_ ) biases.append(UpperCAmelCase_ ) lowerCamelCase , lowerCamelCase , lowerCamelCase =weights[0], biases[0], self.out_projs[0] lowerCamelCase =self._compute_logit(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase =hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowerCamelCase =nn.functional.log_softmax(UpperCAmelCase_ , dim=1 ) lowerCamelCase =[0] + self.cutoffs for i in range(len(UpperCAmelCase_ ) - 1 ): lowerCamelCase , lowerCamelCase =cutoff_values[i], cutoff_values[i + 1] if i == 0: lowerCamelCase =head_logprob[:, : self.cutoffs[0]] else: lowerCamelCase , lowerCamelCase , lowerCamelCase =weights[i], biases[i], self.out_projs[i] lowerCamelCase =self._compute_logit(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase =nn.functional.log_softmax(UpperCAmelCase_ , dim=1 ) lowerCamelCase =head_logprob[:, -i] + tail_logprob_i lowerCamelCase =logprob_i return out
262
import argparse import os import re import packaging.version UpperCAmelCase__ : List[Any] ='''examples/''' UpperCAmelCase__ : List[str] ={ '''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''), '''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } UpperCAmelCase__ : List[Any] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } UpperCAmelCase__ : Union[str, Any] ='''README.md''' def _lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> str: with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase =f.read() lowerCamelCase , lowerCamelCase =REPLACE_PATTERNS[pattern] lowerCamelCase =replace.replace("""VERSION""" , _UpperCAmelCase ) lowerCamelCase =re_pattern.sub(_UpperCAmelCase , _UpperCAmelCase ) with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(_UpperCAmelCase ) def _lowercase ( _UpperCAmelCase ) -> int: for folder, directories, fnames in os.walk(_UpperCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , pattern="""examples""" ) def _lowercase ( _UpperCAmelCase , _UpperCAmelCase=False ) -> Any: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if not patch: update_version_in_examples(_UpperCAmelCase ) def _lowercase ( ) -> Dict: lowerCamelCase ="""🤗 Transformers currently provides the following architectures""" lowerCamelCase ="""1. Want to contribute a new model?""" with open(_UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: lowerCamelCase =f.readlines() # Find the start of the list. lowerCamelCase =0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowerCamelCase =start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): lowerCamelCase =lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(_UpperCAmelCase ) def _lowercase ( ) -> Optional[int]: with open(REPLACE_FILES["""init"""] , """r""" ) as f: lowerCamelCase =f.read() lowerCamelCase =REPLACE_PATTERNS["""init"""][0].search(_UpperCAmelCase ).groups()[0] return packaging.version.parse(_UpperCAmelCase ) def _lowercase ( _UpperCAmelCase=False ) -> List[str]: lowerCamelCase =get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: lowerCamelCase =default_version.base_version elif patch: lowerCamelCase =F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowerCamelCase =F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowerCamelCase =input(F"""Which version are you releasing? [{default_version}]""" ) if len(_UpperCAmelCase ) == 0: lowerCamelCase =default_version print(F"""Updating version to {version}.""" ) global_version_update(_UpperCAmelCase , patch=_UpperCAmelCase ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def _lowercase ( ) -> str: lowerCamelCase =get_version() lowerCamelCase =F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowerCamelCase =current_version.base_version # Check with the user we got that right. lowerCamelCase =input(F"""Which version are we developing now? [{dev_version}]""" ) if len(_UpperCAmelCase ) == 0: lowerCamelCase =dev_version print(F"""Updating version to {version}.""" ) global_version_update(_UpperCAmelCase ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCAmelCase__ : Optional[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') UpperCAmelCase__ : str =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
262
1
import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings UpperCAmelCase : List[Any] =logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _lowercase (a_ ): '''simple docstring''' lowercase__ = field(default=a_ , metadata={"""help""": """Whether to use SortishSampler or not."""} ) lowercase__ = field( default=a_ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} ) lowercase__ = field( default=a_ , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) lowercase__ = field( default=a_ , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) lowercase__ = field( default=a_ , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def _lowerCamelCase ( self ): '''simple docstring''' UpperCamelCase_ = super().to_dict() for k, v in d.items(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase_ = v.to_dict() return d
128
def _lowerCAmelCase (_lowerCAmelCase): if n_term == "": return [] UpperCamelCase_ = [] for temp in range(int(_lowerCAmelCase)): series.append(f"""1/{temp + 1}""" if series else "1") return series if __name__ == "__main__": UpperCAmelCase : Optional[int] =input("""Enter the last number (nth term) of the Harmonic Series""") print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""") print(harmonic_series(nth_term))
128
1
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCamelCase (unittest.TestCase ): '''simple docstring''' _snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : List[str] = hf_hub_download( repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 ) UpperCAmelCase_ : List[str] = [ example_video_filepath, 'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4', ] return video_classifier, examples def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict: for example in examples: UpperCAmelCase_ : str = video_classifier(_UpperCamelCase ) self.assertEqual( _UpperCamelCase , [ {'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )}, {'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )}, ] , ) @require_torch def __UpperCAmelCase ( self ) -> List[Any]: UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification' UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor( size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} ) UpperCAmelCase_ : str = pipeline( 'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 ) UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' ) UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , ) UpperCAmelCase_ : Tuple = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_UpperCamelCase , decimals=4 ) , [ [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}], ] , ) @require_tf def __UpperCAmelCase ( self ) -> Dict: pass
145
from collections import defaultdict from math import ceil, sqrt def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ): '''simple docstring''' UpperCAmelCase_ : defaultdict = defaultdict(__snake_case ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: UpperCAmelCase_ : Union[str, Any] = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: UpperCAmelCase_ : int = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__snake_case , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(F'{solution() = }')
145
1
def UpperCamelCase ( __lowerCamelCase : int ): if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError("Input value must be a 'int' type" ) return bin(__lowerCamelCase ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
59
'''simple docstring''' def _A ( ): lowercase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowercase__ = 6 lowercase__ = 1 lowercase__ = 1901 lowercase__ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowercase__ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowercase__ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowercase__ = day - days_per_month[month - 2] if month > 12: year += 1 lowercase__ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
164
0
"""simple docstring""" import itertools import math def a__ ( snake_case__ ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def a__ ( ) -> Tuple: lowerCamelCase = 2 while True: if is_prime(snake_case__ ): yield num num += 1 def a__ ( snake_case__ = 1_00_01 ) -> int: return next(itertools.islice(prime_generator() , nth - 1 , snake_case__ ) ) if __name__ == "__main__": print(F"""{solution() = }""")
168
"""simple docstring""" import math import random def a__ ( snake_case__ , snake_case__ = False ) -> float: if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowerCAmelCase : Dict = 0.0_2 def a__ ( snake_case__ , snake_case__ ) -> float: lowerCamelCase = float(2 * (random.randint(1 , 1_00 )) - 1 ) for _ in range(snake_case__ ): # Forward propagation lowerCamelCase = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? lowerCamelCase = (expected / 1_00) - layer_a # Error delta lowerCamelCase = layer_1_error * sigmoid_function(snake_case__ , snake_case__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase : Any = int(input("""Expected value: """)) lowerCAmelCase : List[Any] = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
168
1
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _UpperCamelCase = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n' class lowerCamelCase_ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Any ) -> int: __lowerCamelCase : Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) __lowerCamelCase : Any = self.diffusers_dir shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def _lowercase ( self : List[Any] ) -> Dict: __lowerCamelCase : List[str] = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def _lowercase ( self : List[Any] , _a : str , _a : Union[str, Any] , _a : Union[str, Any] , _a : Dict=None ) -> Union[str, Any]: __lowerCamelCase : str = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: __lowerCamelCase : Dict = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result __lowerCamelCase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) __lowerCamelCase : str = black.format_str(_SCREAMING_SNAKE_CASE , mode=_SCREAMING_SNAKE_CASE ) __lowerCamelCase : Tuple = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_SCREAMING_SNAKE_CASE , 'w' , newline='\n' ) as f: f.write(_SCREAMING_SNAKE_CASE ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_SCREAMING_SNAKE_CASE ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , 'r' ) as f: self.assertTrue(f.read() , _SCREAMING_SNAKE_CASE ) def _lowercase ( self : str ) -> List[str]: __lowerCamelCase : Optional[int] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _lowercase ( self : Dict ) -> List[str]: self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _SCREAMING_SNAKE_CASE , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _SCREAMING_SNAKE_CASE ) , ) # Copy consistency with a really long name __lowerCamelCase : str = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , f'{long_class_name}SchedulerOutput' , re.sub('Bert' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _SCREAMING_SNAKE_CASE , overwrite_result=re.sub('DDPM' , 'Test' , _SCREAMING_SNAKE_CASE ) , )
208
'''simple docstring''' import datasets from .evaluate import evaluate SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n' SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n' SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a_ ( datasets.Metric ): def A__ ( self ) -> Tuple: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": { """id""": datasets.Value("""string""" ), """prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ), }, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , ) def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions} UpperCamelCase = [ { """paragraphs""": [ { """qas""": [ { """answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]], """id""": ref["""id"""], } for ref in references ] } ] } ] UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE ) return score
321
0
from math import factorial, radians def UpperCAmelCase_ (_lowerCAmelCase : float , _lowerCAmelCase : int = 18 , _lowerCAmelCase : int = 10 ): __UpperCamelCase : Any = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians __UpperCamelCase : Dict = radians(_lowerCAmelCase ) __UpperCamelCase : Union[str, Any] = angle_in_radians __UpperCamelCase : int = 3 __UpperCamelCase : int = -1 for _ in range(_lowerCAmelCase ): result += (b * (angle_in_radians**a)) / factorial(_lowerCAmelCase ) __UpperCamelCase : List[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": __import__("doctest").testmod()
361
from math import sqrt def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00_00_00 ): __UpperCamelCase : int = 0 __UpperCamelCase : int = 0 __UpperCamelCase : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(_lowerCAmelCase , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
171
0
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowerCamelCase (unittest.TestCase ): '''simple docstring''' def __init__( self , _UpperCamelCase ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = parent def __UpperCAmelCase ( self ) -> str: return {} def lowercase__ ( ): '''simple docstring''' UpperCAmelCase_ : Any = '''<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR="FFFFFF"> <HR> <a href="http://google.com">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style="color:#0000FF"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>''' UpperCAmelCase_ : Optional[int] = ''' <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> ''' return [html_string_a, html_string_a] @require_bsa class lowerCamelCase (UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' _snake_case : Dict = MarkupLMFeatureExtractor if is_bsa_available() else None def __UpperCAmelCase ( self ) -> int: UpperCAmelCase_ : List[str] = MarkupLMFeatureExtractionTester(self ) @property def __UpperCAmelCase ( self ) -> Tuple: return self.feature_extract_tester.prepare_feat_extract_dict() def __UpperCAmelCase ( self ) -> List[str]: # Initialize feature_extractor UpperCAmelCase_ : Tuple = self.feature_extraction_class() # Test not batched input UpperCAmelCase_ : Optional[int] = get_html_strings()[0] UpperCAmelCase_ : List[Any] = feature_extractor(__lowercase ) # fmt: off UpperCAmelCase_ : List[str] = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']] UpperCAmelCase_ : str = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']] # fmt: on self.assertEqual(encoding.nodes , __lowercase ) self.assertEqual(encoding.xpaths , __lowercase ) # Test batched UpperCAmelCase_ : str = get_html_strings() UpperCAmelCase_ : int = feature_extractor(__lowercase ) # fmt: off UpperCAmelCase_ : Optional[Any] = expected_nodes + [['''My First Heading''', '''My first paragraph.''']] UpperCAmelCase_ : List[str] = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , __lowercase ) self.assertEqual(encoding.xpaths , __lowercase )
29
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : Optional[Any] =logging.get_logger(__name__) _UpperCAmelCase : str ={ """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class snake_case__( UpperCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = """vit_mae""" def __init__( self , __lowercase=7_6_8 , __lowercase=1_2 , __lowercase=1_2 , __lowercase=3_0_7_2 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=2_2_4 , __lowercase=1_6 , __lowercase=3 , __lowercase=True , __lowercase=1_6 , __lowercase=5_1_2 , __lowercase=8 , __lowercase=2_0_4_8 , __lowercase=0.75 , __lowercase=False , **__lowercase , ) -> str: super().__init__(**__lowercase ) lowerCAmelCase_ : Dict = hidden_size lowerCAmelCase_ : Any = num_hidden_layers lowerCAmelCase_ : Any = num_attention_heads lowerCAmelCase_ : int = intermediate_size lowerCAmelCase_ : Dict = hidden_act lowerCAmelCase_ : int = hidden_dropout_prob lowerCAmelCase_ : str = attention_probs_dropout_prob lowerCAmelCase_ : List[str] = initializer_range lowerCAmelCase_ : Dict = layer_norm_eps lowerCAmelCase_ : Union[str, Any] = image_size lowerCAmelCase_ : Optional[int] = patch_size lowerCAmelCase_ : Tuple = num_channels lowerCAmelCase_ : List[str] = qkv_bias lowerCAmelCase_ : List[Any] = decoder_num_attention_heads lowerCAmelCase_ : int = decoder_hidden_size lowerCAmelCase_ : Optional[int] = decoder_num_hidden_layers lowerCAmelCase_ : Tuple = decoder_intermediate_size lowerCAmelCase_ : Tuple = mask_ratio lowerCAmelCase_ : Any = norm_pix_loss
262
0
"""simple docstring""" def _lowerCamelCase( a , a , a , a ): global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: __a = mf_knapsack(i - 1 , a , a , a ) else: __a = max( mf_knapsack(i - 1 , a , a , a ) , mf_knapsack(i - 1 , a , a , j - wt[i - 1] ) + val[i - 1] , ) __a = val return f[i][j] def _lowerCamelCase( a , a , a , a ): __a = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: __a = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: __a = dp[i - 1][w_] return dp[n][w_], dp def _lowerCamelCase( a , a , a ): if not (isinstance(a , (list, tuple) ) and isinstance(a , (list, tuple) )): raise ValueError( "Both the weights and values vectors must be either lists or tuples" ) __a = len(a ) if num_items != len(a ): __a = ( "The number of weights must be the same as the number of values.\n" F"But got {num_items} weights and {len(a )} values" ) raise ValueError(a ) for i in range(a ): if not isinstance(wt[i] , a ): __a = ( "All weights must be integers but got weight of " F"type {type(wt[i] )} at index {i}" ) raise TypeError(a ) __a , __a = knapsack(a , a , a , a ) __a = set() _construct_solution(a , a , a , a , a ) return optimal_val, example_optional_set def _lowerCamelCase( a , a , a , a , a ): # for the current item i at a maximum weight j to be part of an optimal subset, # the optimal value at (i, j) must be greater than the optimal value at (i-1, j). # where i - 1 means considering only the previous items at the given maximum weight if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(a , a , i - 1 , a , a ) else: optimal_set.add(a ) _construct_solution(a , a , i - 1 , j - wt[i - 1] , a ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__:Tuple = [3, 2, 4, 4] SCREAMING_SNAKE_CASE__:List[str] = [4, 3, 2, 3] SCREAMING_SNAKE_CASE__:List[str] = 4 SCREAMING_SNAKE_CASE__:List[str] = 6 SCREAMING_SNAKE_CASE__:Optional[Any] = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[int] = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__:Optional[Any] = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
268
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:List[Any] = {"""vocab_file""": """vocab.txt"""} SCREAMING_SNAKE_CASE__:Optional[int] = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } SCREAMING_SNAKE_CASE__:Tuple = { """openbmb/cpm-ant-10b""": 1024, } def _lowerCamelCase( a ): __a = collections.OrderedDict() with open(a , "r" , encoding="utf-8" ) as reader: __a = reader.readlines() for index, token in enumerate(a ): __a = token.rstrip("\n" ) __a = index return vocab class snake_case__ ( snake_case_ ): def __init__( self , lowerCamelCase , lowerCamelCase="<unk>" , lowerCamelCase=200 ): __a = vocab __a = unk_token __a = max_input_chars_per_word def a__ ( self , lowerCamelCase ): __a = list(lowerCamelCase ) if len(lowerCamelCase ) > self.max_input_chars_per_word: return [self.unk_token] __a = 0 __a = [] while start < len(lowerCamelCase ): __a = len(lowerCamelCase ) __a = None while start < end: __a = "".join(chars[start:end] ) if substr in self.vocab: __a = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCamelCase ) __a = end return sub_tokens class snake_case__ ( snake_case_ ): _snake_case : Optional[int] = VOCAB_FILES_NAMES _snake_case : Optional[int] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : int = ["""input_ids""", """attention_mask"""] _snake_case : int = False def __init__( self , lowerCamelCase , lowerCamelCase="<d>" , lowerCamelCase="</d>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase="<unk>" , lowerCamelCase="</n>" , lowerCamelCase="</_>" , lowerCamelCase="left" , **lowerCamelCase , ): requires_backends(self , ["jieba"] ) super().__init__( bod_token=lowerCamelCase , eod_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , unk_token=lowerCamelCase , line_token=lowerCamelCase , space_token=lowerCamelCase , padding_side=lowerCamelCase , **lowerCamelCase , ) __a = bod_token __a = eod_token __a = load_vocab(lowerCamelCase ) __a = self.encoder[space_token] __a = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] __a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) ) __a = {v: k for k, v in self.encoder.items()} __a = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def a__ ( self ): return self.encoder[self.bod_token] @property def a__ ( self ): return self.encoder[self.eod_token] @property def a__ ( self ): return self.encoder["\n"] @property def a__ ( self ): return len(self.encoder ) def a__ ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self , lowerCamelCase ): __a = [] for x in jieba.cut(lowerCamelCase , cut_all=lowerCamelCase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase ) ) return output_tokens def a__ ( self , lowerCamelCase , **lowerCamelCase ): __a = [i for i in token_ids if i >= 0] __a = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCamelCase , **lowerCamelCase ) def a__ ( self , lowerCamelCase ): return token in self.encoder def a__ ( self , lowerCamelCase ): return "".join(lowerCamelCase ) def a__ ( self , lowerCamelCase ): return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) ) def a__ ( self , lowerCamelCase ): return self.decoder.get(lowerCamelCase , self.unk_token ) def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if os.path.isdir(lowerCamelCase ): __a = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: __a = (filename_prefix + "-" if filename_prefix else "") + save_directory __a = 0 if " " in self.encoder: __a = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: __a = self.encoder["\n"] del self.encoder["\n"] __a = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase : x[1] ) ) with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." " Please check that the vocabulary is not corrupted!" ) __a = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase ) if token_ids_a is not None: return [1] + ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) return [1] + ([0] * len(lowerCamelCase ))
268
1
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def lowerCAmelCase__ ( ) -> List[Any]: """simple docstring""" snake_case = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_lowerCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_lowerCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_lowerCAmelCase , default=4_2 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_lowerCAmelCase , default=0 , help='cuda_id.' , ) snake_case = parser.parse_args() return args def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" if not len(_lowerCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) snake_case = imgs[0].size snake_case = Image.new('RGB' , size=(cols * w, rows * h) ) snake_case = grid.size for i, img in enumerate(_lowerCAmelCase ): grid.paste(_lowerCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : int="robotic cat with wings" , _UpperCamelCase : str=7.5 , _UpperCamelCase : Any=5_0 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : Union[str, Any]=4_2 , ) -> Dict: """simple docstring""" snake_case = torch.Generator(pipeline.device ).manual_seed(_lowerCAmelCase ) snake_case = pipeline( _lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , ).images snake_case = int(math.sqrt(_lowerCAmelCase ) ) snake_case = image_grid(_lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images SCREAMING_SNAKE_CASE__ = parse_args() # Load models and create wrapper for stable diffusion SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") SCREAMING_SNAKE_CASE__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") SCREAMING_SNAKE_CASE__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") SCREAMING_SNAKE_CASE__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) SCREAMING_SNAKE_CASE__ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")): SCREAMING_SNAKE_CASE__ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, "unet", unet) else: SCREAMING_SNAKE_CASE__ = unet.to(torch.device("cuda", args.cuda_id)) SCREAMING_SNAKE_CASE__ = pipeline.to(unet.device) SCREAMING_SNAKE_CASE__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split())))) SCREAMING_SNAKE_CASE__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
150
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> int: if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
299
'''simple docstring''' import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''label_embs_concat''': '''label_embeddings_concat''', '''mask_emb''': '''masked_spec_embed''', '''spk_proj''': '''speaker_proj''', } UpperCamelCase__ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', '''label_embeddings_concat''', '''speaker_proj''', '''layer_norm_for_extract''', ] def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: for attribute in key.split('''.''' ): UpperCAmelCase__ : Optional[int] = getattr(lowerCAmelCase__ , lowerCAmelCase__ ) if weight_type is not None: UpperCAmelCase__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape else: UpperCAmelCase__ : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase__ : int = value elif weight_type == "weight_g": UpperCAmelCase__ : Dict = value elif weight_type == "weight_v": UpperCAmelCase__ : List[str] = value elif weight_type == "bias": UpperCAmelCase__ : Tuple = value else: UpperCAmelCase__ : Tuple = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : Dict = fairseq_model.state_dict() UpperCAmelCase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase__ : Any = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase__ : str = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase__ : List[str] = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue UpperCAmelCase__ : Optional[int] = True if "*" in mapped_key: UpperCAmelCase__ : str = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2] UpperCAmelCase__ : Optional[int] = mapped_key.replace('''*''' , lowerCAmelCase__ ) if "weight_g" in name: UpperCAmelCase__ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase__ : Dict = '''weight_v''' elif "bias" in name: UpperCAmelCase__ : Optional[int] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase__ : Tuple = '''weight''' else: UpperCAmelCase__ : Optional[Any] = None set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) continue if not is_used: unused_weights.append(lowerCAmelCase__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: UpperCAmelCase__ : Tuple = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase__ : Optional[Any] = name.split('''.''' ) UpperCAmelCase__ : Union[str, Any] = int(items[0] ) UpperCAmelCase__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase__ : str = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[int] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase__ : List[str] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase__ : Optional[Any] = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(lowerCAmelCase__ ) @torch.no_grad() def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True ) -> Any: if config_path is not None: UpperCAmelCase__ : Any = UniSpeechSatConfig.from_pretrained(lowerCAmelCase__ ) else: UpperCAmelCase__ : int = UniSpeechSatConfig() UpperCAmelCase__ : Tuple = '''''' if is_finetuned: UpperCAmelCase__ : Optional[int] = UniSpeechSatForCTC(lowerCAmelCase__ ) else: UpperCAmelCase__ : List[Any] = UniSpeechSatForPreTraining(lowerCAmelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) UpperCAmelCase__ : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ ) hf_wavavec.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase__ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
299
1
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class _lowerCamelCase ( a_ ): @require_torch def _lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " lowerCAmelCase__ : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " lowerCAmelCase__ : List[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache lowerCAmelCase__ : Union[str, Any] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(lowerCAmelCase__ ) BertModel.from_pretrained(lowerCAmelCase__ ) BertTokenizer.from_pretrained(lowerCAmelCase__ ) pipeline(task="""fill-mask""" , model=lowerCAmelCase__ ) # baseline - just load from_pretrained with normal network lowerCAmelCase__ : List[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed lowerCAmelCase__ : Optional[int] = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ : List[Any] = "1" lowerCAmelCase__ : Dict = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def _lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : Dict = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n " lowerCAmelCase__ : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n " lowerCAmelCase__ : Tuple = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n " # Force fetching the files so that we can use the cache lowerCAmelCase__ : Optional[int] = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(lowerCAmelCase__ ) BertModel.from_pretrained(lowerCAmelCase__ ) BertTokenizer.from_pretrained(lowerCAmelCase__ ) pipeline(task="""fill-mask""" , model=lowerCAmelCase__ ) # baseline - just load from_pretrained with normal network lowerCAmelCase__ : List[Any] = [sys.executable, "-c", "\n".join([load, run, mock] )] # should succeed lowerCAmelCase__ : List[str] = self.get_env() lowerCAmelCase__ : Optional[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" lowerCAmelCase__ : Tuple = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n " lowerCAmelCase__ : str = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n " lowerCAmelCase__ : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " # baseline - just load from_pretrained with normal network lowerCAmelCase__ : Any = [sys.executable, "-c", "\n".join([load, run] )] # should succeed lowerCAmelCase__ : List[str] = self.get_env() lowerCAmelCase__ : List[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # next emulate no network lowerCAmelCase__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ : str = "1" lowerCAmelCase__ : List[str] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = "\nfrom transformers import pipeline\n " lowerCAmelCase__ : Any = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n " lowerCAmelCase__ : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n " lowerCAmelCase__ : Optional[int] = self.get_env() lowerCAmelCase__ : Union[str, Any] = "1" lowerCAmelCase__ : List[str] = [sys.executable, "-c", "\n".join([load, mock, run] )] lowerCAmelCase__ : Optional[Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( """You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , ) @require_torch def _lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Tuple = "\nfrom transformers import AutoModel\n " lowerCAmelCase__ : Dict = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n " # baseline - just load from_pretrained with normal network lowerCAmelCase__ : List[str] = [sys.executable, "-c", "\n".join([load, run] )] # should succeed lowerCAmelCase__ : Optional[Any] = self.get_env() lowerCAmelCase__ : Any = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowerCAmelCase__ : int = "1" lowerCAmelCase__ : Union[str, Any] = subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() )
242
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json', # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class A__ ( UpperCamelCase ): """simple docstring""" UpperCamelCase_ : Dict = '''vit_msn''' def __init__( self : Optional[int] , lowerCAmelCase__ : str=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : int=1_2 , lowerCAmelCase__ : Optional[Any]=3_0_7_2 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=1e-06 , lowerCAmelCase__ : Union[str, Any]=2_2_4 , lowerCAmelCase__ : Optional[int]=1_6 , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : str=True , **lowerCAmelCase__ : Optional[Any] , ) -> int: """simple docstring""" super().__init__(**lowerCAmelCase__ ) _UpperCAmelCase : Any = hidden_size _UpperCAmelCase : str = num_hidden_layers _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : Any = intermediate_size _UpperCAmelCase : Any = hidden_act _UpperCAmelCase : str = hidden_dropout_prob _UpperCAmelCase : Tuple = attention_probs_dropout_prob _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : Tuple = layer_norm_eps _UpperCAmelCase : int = image_size _UpperCAmelCase : Tuple = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Optional[int] = qkv_bias
145
0
def snake_case_ (__A : int ) -> bool: __lowerCAmelCase : List[str] = (1 + 2_4 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ (__A : int = 5_0_0_0 ) -> int: __lowerCAmelCase : List[str] = [(i * (3 * i - 1)) // 2 for i in range(1 , __A )] for i, pentagonal_i in enumerate(__A ): for j in range(__A , len(__A ) ): __lowerCAmelCase : Any = pentagonal_nums[j] __lowerCAmelCase : Tuple = pentagonal_i + pentagonal_j __lowerCAmelCase : Optional[int] = pentagonal_j - pentagonal_i if is_pentagonal(__A ) and is_pentagonal(__A ): return b return -1 if __name__ == "__main__": print(F'{solution() = }')
139
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE ( a_ , a_ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Tuple =AutoencoderKL lowerCamelCase : Tuple ="sample" lowerCamelCase : Dict =1e-2 @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: """simple docstring""" __lowerCAmelCase : str = 4 __lowerCAmelCase : Dict = 3 __lowerCAmelCase : Optional[Any] = (32, 32) __lowerCAmelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase ) return {"sample": image} @property def SCREAMING_SNAKE_CASE ( self : Any ) -> int: """simple docstring""" return (3, 32, 32) @property def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: """simple docstring""" return (3, 32, 32) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: """simple docstring""" __lowerCAmelCase : List[Any] = { """block_out_channels""": [32, 64], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __lowerCAmelCase : Optional[int] = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE ( self : int ) -> str: """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: """simple docstring""" __lowerCAmelCase ,__lowerCAmelCase : str = self.prepare_init_args_and_inputs_for_common() __lowerCAmelCase : Dict = self.model_class(**lowerCAmelCase ) model.to(lowerCAmelCase ) assert not model.is_gradient_checkpointing and model.training __lowerCAmelCase : str = model(**lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __lowerCAmelCase : Any = torch.randn_like(lowerCAmelCase ) __lowerCAmelCase : str = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __lowerCAmelCase : List[str] = self.model_class(**lowerCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(lowerCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __lowerCAmelCase : Any = model_a(**lowerCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __lowerCAmelCase : Dict = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) __lowerCAmelCase : int = dict(model.named_parameters() ) __lowerCAmelCase : Union[str, Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: """simple docstring""" __lowerCAmelCase ,__lowerCAmelCase : List[Any] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=lowerCAmelCase ) self.assertIsNotNone(lowerCAmelCase ) self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 ) model.to(lowerCAmelCase ) __lowerCAmelCase : int = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def SCREAMING_SNAKE_CASE ( self : Any ) -> Any: """simple docstring""" __lowerCAmelCase : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ) __lowerCAmelCase : Optional[Any] = model.to(lowerCAmelCase ) model.eval() if torch_device == "mps": __lowerCAmelCase : List[Any] = torch.manual_seed(0 ) else: __lowerCAmelCase : Any = torch.Generator(device=lowerCAmelCase ).manual_seed(0 ) __lowerCAmelCase : Optional[int] = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __lowerCAmelCase : Optional[int] = image.to(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = model(lowerCAmelCase , sample_posterior=lowerCAmelCase , generator=lowerCAmelCase ).sample __lowerCAmelCase : Dict = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __lowerCAmelCase : List[str] = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif torch_device == "cpu": __lowerCAmelCase : Union[str, Any] = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: __lowerCAmelCase : Tuple = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1e-2 ) ) @slow class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ) -> int: """simple docstring""" return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase ) for s in shape] )}.npy''' def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Union[str, Any]=0 , lowerCAmelCase : Any=(4, 3, 5_12, 5_12) , lowerCAmelCase : Any=False ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa __lowerCAmelCase : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) ).to(lowerCAmelCase ).to(lowerCAmelCase ) return image def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any]="CompVis/stable-diffusion-v1-4" , lowerCAmelCase : int=False ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Optional[Any] = """fp16""" if fpaa else None __lowerCAmelCase : List[str] = torch.floataa if fpaa else torch.floataa __lowerCAmelCase : Dict = AutoencoderKL.from_pretrained( lowerCAmelCase , subfolder="""vae""" , torch_dtype=lowerCAmelCase , revision=lowerCAmelCase , ) model.to(lowerCAmelCase ).eval() return model def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Tuple=0 ) -> Tuple: """simple docstring""" if torch_device == "mps": return torch.manual_seed(lowerCAmelCase ) return torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ) -> List[Any]: """simple docstring""" __lowerCAmelCase : Dict = self.get_sd_vae_model() __lowerCAmelCase : Optional[int] = self.get_sd_image(lowerCAmelCase ) __lowerCAmelCase : List[str] = self.get_generator(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Optional[Any] = model(lowerCAmelCase , generator=lowerCAmelCase , sample_posterior=lowerCAmelCase ).sample assert sample.shape == image.shape __lowerCAmelCase : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu() __lowerCAmelCase : List[str] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> Any: """simple docstring""" __lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=lowerCAmelCase ) __lowerCAmelCase : Tuple = self.get_sd_image(lowerCAmelCase , fpaa=lowerCAmelCase ) __lowerCAmelCase : Optional[int] = self.get_generator(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Dict = model(lowerCAmelCase , generator=lowerCAmelCase , sample_posterior=lowerCAmelCase ).sample assert sample.shape == image.shape __lowerCAmelCase : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu() __lowerCAmelCase : Optional[int] = torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : Any ) -> str: """simple docstring""" __lowerCAmelCase : Union[str, Any] = self.get_sd_vae_model() __lowerCAmelCase : Optional[int] = self.get_sd_image(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : List[Any] = model(lowerCAmelCase ).sample assert sample.shape == image.shape __lowerCAmelCase : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() __lowerCAmelCase : str = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ) -> str: """simple docstring""" __lowerCAmelCase : Dict = self.get_sd_vae_model() __lowerCAmelCase : Optional[Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): __lowerCAmelCase : Optional[Any] = model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] __lowerCAmelCase : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu() __lowerCAmelCase : Tuple = torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ) -> List[Any]: """simple docstring""" __lowerCAmelCase : Tuple = self.get_sd_vae_model(fpaa=lowerCAmelCase ) __lowerCAmelCase : str = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Dict = model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] __lowerCAmelCase : Any = sample[-1, -2:, :2, -2:].flatten().float().cpu() __lowerCAmelCase : Union[str, Any] = torch.tensor(lowerCAmelCase ) assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : Any ) -> Any: """simple docstring""" __lowerCAmelCase : List[Any] = self.get_sd_vae_model(fpaa=lowerCAmelCase ) __lowerCAmelCase : Union[str, Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) , fpaa=lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = model.decode(lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __lowerCAmelCase : int = model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" ) def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[Any] ) -> Dict: """simple docstring""" __lowerCAmelCase : Optional[int] = self.get_sd_vae_model() __lowerCAmelCase : Optional[Any] = self.get_sd_image(lowerCAmelCase , shape=(3, 4, 64, 64) ) with torch.no_grad(): __lowerCAmelCase : Optional[Any] = model.decode(lowerCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __lowerCAmelCase : Tuple = model.decode(lowerCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_12, 5_12] assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : int , lowerCAmelCase : List[Any] ) -> Any: """simple docstring""" __lowerCAmelCase : Optional[Any] = self.get_sd_vae_model() __lowerCAmelCase : List[str] = self.get_sd_image(lowerCAmelCase ) __lowerCAmelCase : Any = self.get_generator(lowerCAmelCase ) with torch.no_grad(): __lowerCAmelCase : Optional[int] = model.encode(lowerCAmelCase ).latent_dist __lowerCAmelCase : Union[str, Any] = dist.sample(generator=lowerCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __lowerCAmelCase : Any = sample[0, -1, -3:, -3:].flatten().cpu() __lowerCAmelCase : int = torch.tensor(lowerCAmelCase ) __lowerCAmelCase : str = 3e-3 if torch_device != """mps""" else 1e-2 assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase )
139
1
import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( _a, unittest.TestCase ): lowerCamelCase_ : Any = BioGptTokenizer lowerCamelCase_ : Optional[Any] = False def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case_ : Optional[Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] snake_case_ : Union[str, Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) snake_case_ : Union[str, Any] = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' ) as fp: fp.write(json.dumps(__magic_name__ ) ) with open(self.merges_file , '''w''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def lowerCamelCase (self , __magic_name__ ) -> int: '''simple docstring''' snake_case_ : str = '''lower newer''' snake_case_ : Dict = '''lower newer''' return input_text, output_text def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) snake_case_ : Union[str, Any] = '''lower''' snake_case_ : Optional[int] = ['''low''', '''er</w>'''] snake_case_ : Any = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) snake_case_ : Optional[int] = tokens + ['''<unk>'''] snake_case_ : List[str] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) @slow def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Optional[int] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) snake_case_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) snake_case_ : str = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
279
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
279
1
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : list[str] ): lowercase_ :Any = "" for word_or_phrase in separated: if not isinstance(__lowerCamelCase ,__lowerCamelCase ): raise Exception("join() accepts only strings to be joined" ) joined += word_or_phrase + separator return joined.strip(__lowerCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
147
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__) lowerCAmelCase : Optional[Any] ={ '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class a_ ( _lowerCAmelCase ): __A = "gpt_neo" __A = ["past_key_values"] __A = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self : Union[str, Any] , lowercase : Tuple=50_257 , lowercase : Optional[Any]=2_048 , lowercase : Union[str, Any]=2_048 , lowercase : int=24 , lowercase : Optional[Any]=[[["global", "local"], 12]] , lowercase : List[Any]=16 , lowercase : List[str]=None , lowercase : Union[str, Any]=256 , lowercase : Optional[Any]="gelu_new" , lowercase : Any=0.0 , lowercase : List[Any]=0.0 , lowercase : Any=0.0 , lowercase : str=0.1 , lowercase : Dict=1e-5 , lowercase : List[str]=0.02 , lowercase : Union[str, Any]=True , lowercase : int=50_256 , lowercase : Union[str, Any]=50_256 , **lowercase : Dict , ): """simple docstring""" lowercase_ :str = vocab_size lowercase_ :Tuple = max_position_embeddings lowercase_ :Tuple = hidden_size lowercase_ :List[str] = num_layers lowercase_ :int = num_heads lowercase_ :Union[str, Any] = intermediate_size lowercase_ :Tuple = window_size lowercase_ :Any = activation_function lowercase_ :Tuple = resid_dropout lowercase_ :Any = embed_dropout lowercase_ :str = attention_dropout lowercase_ :List[str] = classifier_dropout lowercase_ :List[Any] = layer_norm_epsilon lowercase_ :List[str] = initializer_range lowercase_ :int = use_cache lowercase_ :Tuple = bos_token_id lowercase_ :Optional[Any] = eos_token_id lowercase_ :int = attention_types lowercase_ :Tuple = self.expand_attention_types_params(lowercase ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " F'but is `len(config.attention_layers) = {len(self.attention_layers )}`, ' F'`config.num_layers = {self.num_layers}`. ' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) @staticmethod def lowercase__ ( lowercase : str ): """simple docstring""" lowercase_ :Union[str, Any] = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def UpperCAmelCase_ ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[Any] ,__lowerCamelCase : Dict ): import torch lowercase_ :List[str] = input.size() lowercase_ :Union[str, Any] = len(__lowerCamelCase ) lowercase_ :Any = shape[dimension] lowercase_ :str = torch.arange(0 ,__lowerCamelCase ,__lowerCamelCase ) lowercase_ :Union[str, Any] = torch.div(sizedim - size ,__lowerCamelCase ,rounding_mode="floor" ) + 1 lowercase_ :int = torch.arange(__lowerCamelCase ) + low_indices[:min_length][:, None] lowercase_ :List[Any] = [slice(__lowerCamelCase )] * rank lowercase_ :int = indices lowercase_ :Dict = input[s] lowercase_ :List[str] = list(range(0 ,rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : List[str] ,__lowerCamelCase : Any ): import torch lowercase_ :List[Any] = torch.arange(1 ,__lowerCamelCase ) lowercase_ :int = torch.remainder(__lowerCamelCase ,__lowerCamelCase ) lowercase_ :Optional[int] = remainders == 0 lowercase_ :int = candidates[divisor_indices] lowercase_ :Tuple = torch.max(__lowerCamelCase ) return largest_divisor, torch.div(__lowerCamelCase ,__lowerCamelCase ,rounding_mode="floor" ) class a_ ( _lowerCAmelCase ): @property def lowercase__ ( self : str ): """simple docstring""" lowercase_ :int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(lowercase , direction="inputs" ) lowercase_ :Union[str, Any] = {0: "batch", 1: "past_sequence + sequence"} else: lowercase_ :str = {0: "batch", 1: "sequence"} return common_inputs @property def lowercase__ ( self : Tuple ): """simple docstring""" return self._config.num_heads def lowercase__ ( self : List[str] , lowercase : PreTrainedTokenizer , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ): """simple docstring""" lowercase_ :List[str] = super(lowercase , self ).generate_dummy_inputs( lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase ) # We need to order the input in the way they appears in the forward() lowercase_ :Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowercase_ , lowercase_ :Tuple = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowercase_ :Any = seqlen + 2 lowercase_ :List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowercase_ :Dict = [ (torch.zeros(lowercase ), torch.zeros(lowercase )) for _ in range(self.num_layers ) ] lowercase_ :Tuple = common_inputs["attention_mask"] if self.use_past: lowercase_ :Optional[int] = ordered_inputs["attention_mask"].dtype lowercase_ :List[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(lowercase , lowercase , dtype=lowercase )] , dim=1 ) return ordered_inputs @property def lowercase__ ( self : int ): """simple docstring""" return 13
147
1
from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : '''simple docstring''' def __init__( self :List[Any] , a :Dict , a :Any=3 , a :Any=3_2 , a :Optional[Any]=3 , a :str=1_0 , a :Union[str, Any]=[1_0, 2_0, 3_0, 4_0] , a :Optional[Any]=[1, 1, 2, 1] , a :Optional[Any]=True , a :Dict=True , a :Tuple="relu" , a :List[str]=3 , a :Tuple=None , ) -> Tuple: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : Dict = batch_size __UpperCamelCase : int = image_size __UpperCamelCase : Dict = num_channels __UpperCamelCase : Optional[int] = embeddings_size __UpperCamelCase : List[Any] = hidden_sizes __UpperCamelCase : Optional[Any] = depths __UpperCamelCase : Optional[int] = is_training __UpperCamelCase : Union[str, Any] = use_labels __UpperCamelCase : Optional[int] = hidden_act __UpperCamelCase : Tuple = num_labels __UpperCamelCase : Tuple = scope __UpperCamelCase : Dict = len(a ) def _lowerCamelCase ( self :Optional[int] ) -> Any: __UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCamelCase : List[str] = None if self.use_labels: __UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) __UpperCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def _lowerCamelCase ( self :Union[str, Any] ) -> int: return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _lowerCamelCase ( self :List[Any] , a :Dict , a :int , a :Optional[Any] ) -> Tuple: __UpperCamelCase : str = TFResNetModel(config=a ) __UpperCamelCase : Union[str, Any] = model(a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def _lowerCamelCase ( self :Union[str, Any] , a :Optional[int] , a :List[str] , a :Optional[Any] ) -> Any: __UpperCamelCase : str = self.num_labels __UpperCamelCase : Optional[int] = TFResNetForImageClassification(a ) __UpperCamelCase : List[str] = model(a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self :Optional[int] ) -> List[str]: __UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Union[str, Any] = config_and_inputs __UpperCamelCase : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase): '''simple docstring''' _A = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _A = ( {'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification} if is_tf_available() else {} ) _A = False _A = False _A = False _A = False _A = False def _lowerCamelCase ( self :int ) -> List[str]: __UpperCamelCase : Union[str, Any] = TFResNetModelTester(self ) __UpperCamelCase : List[Any] = ConfigTester(self , config_class=a , has_text_modality=a ) def _lowerCamelCase ( self :int ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCamelCase ( self :str ) -> Optional[Any]: return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def _lowerCamelCase ( self :Tuple ) -> Tuple: pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def _lowerCamelCase ( self :List[Any] ) -> List[str]: pass def _lowerCamelCase ( self :Optional[int] ) -> Tuple: __UpperCamelCase , __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCamelCase : Dict = model_class(a ) __UpperCamelCase : Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCamelCase : Dict = [*signature.parameters.keys()] __UpperCamelCase : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , a ) def _lowerCamelCase ( self :List[str] ) -> List[str]: __UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _lowerCamelCase ( self :Optional[Any] ) -> Tuple: def check_hidden_states_output(a :Optional[Any] , a :Optional[int] , a :List[str] ): __UpperCamelCase : int = model_class(a ) __UpperCamelCase : int = model(**self._prepare_for_class(a , a ) ) __UpperCamelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(a ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCamelCase , __UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCamelCase : str = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: __UpperCamelCase : int = layer_type __UpperCamelCase : int = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCamelCase : int = True check_hidden_states_output(a , a , a ) def _lowerCamelCase ( self :Union[str, Any] ) -> Dict: __UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a ) @slow def _lowerCamelCase ( self :Dict ) -> Dict: for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Optional[Any] = TFResNetModel.from_pretrained(a ) self.assertIsNotNone(a ) def _SCREAMING_SNAKE_CASE ( ) -> int: '''simple docstring''' __UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_tf @require_vision class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @cached_property def _lowerCamelCase ( self :Optional[Any] ) -> Tuple: return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]: __UpperCamelCase : int = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __UpperCamelCase : List[Any] = self.default_image_processor __UpperCamelCase : List[str] = prepare_img() __UpperCamelCase : List[str] = image_processor(images=a , return_tensors="tf" ) # forward pass __UpperCamelCase : Dict = model(**a ) # verify the logits __UpperCamelCase : Dict = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , a ) __UpperCamelCase : Union[str, Any] = tf.constant([-11.1069, -9.7877, -8.3777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , a , atol=1E-4 ) )
232
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch lowercase : List[str] = logging.get_logger(__name__) class lowerCamelCase__ : '''simple docstring''' def __init__( self :str , a :str = None , a :uuid.UUID = None , a :Tuple=None , a :Optional[Any]=None ) -> str: if not conversation_id: __UpperCamelCase : Dict = uuid.uuida() if past_user_inputs is None: __UpperCamelCase : List[Any] = [] if generated_responses is None: __UpperCamelCase : Any = [] __UpperCamelCase : uuid.UUID = conversation_id __UpperCamelCase : List[str] = past_user_inputs __UpperCamelCase : List[str] = generated_responses __UpperCamelCase : Optional[str] = text def __eq__( self :Optional[int] , a :Optional[int] ) -> Union[str, Any]: if not isinstance(a , a ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def _lowerCamelCase ( self :Optional[int] , a :str , a :bool = False ) -> str: if self.new_user_input: if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' f'with: "{text}".' ) __UpperCamelCase : Any = text else: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: __UpperCamelCase : int = text def _lowerCamelCase ( self :List[str] ) -> int: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __UpperCamelCase : Dict = None def _lowerCamelCase ( self :Optional[int] , a :str ) -> Optional[int]: self.generated_responses.append(a ) def _lowerCamelCase ( self :int ) -> Optional[Any]: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self :List[str] ) -> List[Any]: __UpperCamelCase : Any = f'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): __UpperCamelCase : str = "user" if is_user else "bot" output += f'{name} >> {text} \n' return output @add_end_docstrings( __lowercase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class lowerCamelCase__ ( __lowercase): '''simple docstring''' def __init__( self :Tuple , *a :Tuple , **a :List[str] ) -> Tuple: super().__init__(*a , **a ) if self.tokenizer.pad_token_id is None: __UpperCamelCase : int = self.tokenizer.eos_token def _lowerCamelCase ( self :Optional[int] , a :List[Any]=None , a :str=None , a :int=None , **a :str ) -> List[str]: __UpperCamelCase : List[str] = {} __UpperCamelCase : List[str] = {} __UpperCamelCase : str = {} if min_length_for_response is not None: __UpperCamelCase : Optional[Any] = min_length_for_response if minimum_tokens is not None: __UpperCamelCase : List[str] = minimum_tokens if "max_length" in generate_kwargs: __UpperCamelCase : List[Any] = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __UpperCamelCase : List[Any] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(a ) return preprocess_params, forward_params, postprocess_params def __call__( self :Dict , a :Union[Conversation, List[Conversation]] , a :List[Any]=0 , **a :Any ) -> Union[str, Any]: __UpperCamelCase : Optional[int] = super().__call__(a , num_workers=a , **a ) if isinstance(a , a ) and len(a ) == 1: return outputs[0] return outputs def _lowerCamelCase ( self :Tuple , a :Conversation , a :Dict=3_2 ) -> Dict[str, Any]: if not isinstance(a , a ): raise ValueError("ConversationalPipeline, expects Conversation as inputs" ) if conversation.new_user_input is None: raise ValueError( f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' "Add user inputs with the conversation's `add_user_input` method" ) if hasattr(self.tokenizer , "_build_conversation_input_ids" ): __UpperCamelCase : str = self.tokenizer._build_conversation_input_ids(a ) else: # If the tokenizer cannot handle conversations, we default to only the old version __UpperCamelCase : Optional[Any] = self._legacy_parse_and_tokenize(a ) if self.framework == "pt": __UpperCamelCase : Dict = torch.LongTensor([input_ids] ) elif self.framework == "tf": __UpperCamelCase : Any = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def _lowerCamelCase ( self :Any , a :List[Any] , a :Optional[Any]=1_0 , **a :Tuple ) -> List[str]: __UpperCamelCase : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length ) __UpperCamelCase : Dict = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) __UpperCamelCase : Dict = max_length - minimum_tokens __UpperCamelCase : Optional[int] = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: __UpperCamelCase : Dict = model_inputs["attention_mask"][:, -trim:] __UpperCamelCase : List[str] = model_inputs.pop("conversation" ) __UpperCamelCase : Optional[int] = max_length __UpperCamelCase : str = self.model.generate(**a , **a ) if self.model.config.is_encoder_decoder: __UpperCamelCase : List[str] = 1 else: __UpperCamelCase : Optional[int] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def _lowerCamelCase ( self :List[Any] , a :str , a :Optional[int]=True ) -> Union[str, Any]: __UpperCamelCase : List[str] = model_outputs["output_ids"] __UpperCamelCase : Any = self.tokenizer.decode( output_ids[0] , skip_special_tokens=a , clean_up_tokenization_spaces=a , ) __UpperCamelCase : int = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(a ) return conversation def _lowerCamelCase ( self :str , a :Conversation ) -> Dict: __UpperCamelCase : int = self.tokenizer.eos_token_id __UpperCamelCase : Any = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(a , add_special_tokens=a ) ) if len(a ) > self.tokenizer.model_max_length: __UpperCamelCase : Union[str, Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
232
1
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def snake_case_ ( snake_case ) -> List[Any]: lowercase__: Optional[int] = filter(lambda snake_case : p.requires_grad , model.parameters() ) lowercase__: Any = sum([np.prod(p.size() ) for p in model_parameters] ) return params __lowerCAmelCase = logging.getLogger(__name__) def snake_case_ ( snake_case , snake_case ) -> Union[str, Any]: if metric == "rouge2": lowercase__: int = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": lowercase__: Union[str, Any] = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": lowercase__: int = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this' ' function.' ) lowercase__: List[Any] = ModelCheckpoint( dirpath=snake_case , filename=snake_case , monitor=f'val_{metric}' , mode='max' , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def snake_case_ ( snake_case , snake_case ) -> Dict: return EarlyStopping( monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=snake_case , verbose=snake_case , ) class __a ( pl.Callback ): def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: '''simple docstring''' lowercase__: Tuple = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(lowerCAmelCase__ ) @rank_zero_only def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> None: '''simple docstring''' logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' ) lowercase__: Union[str, Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results lowercase__: List[Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": lowercase__: str = od / 'test_results.txt' lowercase__: Tuple = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. lowercase__: Optional[Any] = od / F'{type_path}_results/{trainer.global_step:05d}.txt' lowercase__: Any = od / F'{type_path}_generations/{trainer.global_step:05d}.txt' results_file.parent.mkdir(exist_ok=lowerCAmelCase__ ) generations_file.parent.mkdir(exist_ok=lowerCAmelCase__ ) with open(lowerCAmelCase__ , 'a+' ) as writer: for key in sorted(lowerCAmelCase__ ): if key in ["log", "progress_bar", "preds"]: continue lowercase__: Optional[Any] = metrics[key] if isinstance(lowerCAmelCase__ , torch.Tensor ): lowercase__: Dict = val.item() lowercase__: Optional[int] = F'{key}: {val:.6f}\n' writer.write(lowerCAmelCase__ ) if not save_generations: return if "preds" in metrics: lowercase__: Tuple = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(lowerCAmelCase__ ) @rank_zero_only def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: '''simple docstring''' try: lowercase__: int = pl_module.model.model.num_parameters() except AttributeError: lowercase__: List[str] = pl_module.model.num_parameters() lowercase__: List[Any] = count_trainable_parameters(lowerCAmelCase__ ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(lowerCAmelCase__ , lowerCAmelCase__ , 'test' ) @rank_zero_only def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
371
from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging __lowerCAmelCase = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class __a ( __UpperCamelCase ): def __init__( self , lowerCAmelCase__ = 101 ) -> Any: '''simple docstring''' lowercase__: Any = length def __len__( self ) -> List[Any]: '''simple docstring''' return self.length def __getitem__( self , lowerCAmelCase__ ) -> int: '''simple docstring''' return i class __a : def __call__( self , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )} class __a ( nn.Module ): def __init__( self ) -> Tuple: '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. lowercase__: List[str] = nn.Linear(120 , 80 ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int: '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class __a ( __UpperCamelCase ): @require_torch_neuroncore def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() lowercase__: Tuple = self.get_auto_remove_tmp_dir() lowercase__: Optional[int] = F'--output_dir {output_dir}'.split() lowercase__: int = ['torchrun'] + distributed_args + args execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class __a ( __UpperCamelCase ): @require_torch_multi_gpu def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split() lowercase__: Tuple = self.get_auto_remove_tmp_dir() lowercase__: List[str] = F'--output_dir {output_dir}'.split() lowercase__: int = ['torchrun'] + distributed_args + args execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py __lowerCAmelCase = HfArgumentParser((TrainingArguments,)) __lowerCAmelCase = parser.parse_args_into_dataclasses()[0] logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_01, 40, 7]: __lowerCAmelCase = DummyDataset(dataset_length) def snake_case_ ( snake_case ) -> Dict: lowercase__: str = list(range(len(snake_case ) ) ) lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( 'Predictions and/or labels do not match expected results:\n - predictions: ' f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' ) return {"success": success} __lowerCAmelCase = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) __lowerCAmelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __lowerCAmelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __lowerCAmelCase = 2 __lowerCAmelCase = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) __lowerCAmelCase = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) __lowerCAmelCase = None
288
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->int: snake_case_ = ['a', 'b', 'c'] # Defaults to last layer if both are None snake_case_ = get_aligned_output_features_output_indices(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , ['''c'''] ) self.assertEqual(_UpperCamelCase , [2] ) # Out indices set to match out features snake_case_ = get_aligned_output_features_output_indices(['''a''', '''c'''] , _UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , ['''a''', '''c'''] ) self.assertEqual(_UpperCamelCase , [0, 2] ) # Out features set to match out indices snake_case_ = get_aligned_output_features_output_indices(_UpperCamelCase , [0, 2] , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , ['''a''', '''c'''] ) self.assertEqual(_UpperCamelCase , [0, 2] ) # Out features selected from negative indices snake_case_ = get_aligned_output_features_output_indices(_UpperCamelCase , [-3, -1] , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , ['''a''', '''c'''] ) self.assertEqual(_UpperCamelCase , [-3, -1] ) def snake_case__( self : Any ) ->Optional[int]: with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _UpperCamelCase ) # Out features must be a list with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] ) # Out features must be a subset of stage names with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] ) # Out indices must be a list or tuple with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(_UpperCamelCase , 0 , ['''a''', '''b'''] ) # Out indices must be a subset of stage names with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(_UpperCamelCase , (0, 1) , ['''a'''] ) # Out features and out indices must be the same length with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] ) # Out features should match out indices with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] ) # Out features and out indices should be in order with self.assertRaises(_UpperCamelCase ): verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] ) # Check passes with valid inputs verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] ) def snake_case__( self : Dict ) ->List[Any]: snake_case_ = BackboneMixin() snake_case_ = ['a', 'b', 'c'] snake_case_ = ['a', 'c'] snake_case_ = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly snake_case_ = ['a', 'b'] self.assertEqual(backbone.out_features , ['''a''', '''b'''] ) self.assertEqual(backbone.out_indices , [0, 1] ) snake_case_ = [-3, -1] self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [-3, -1] )
8
'''simple docstring''' from __future__ import annotations def lowercase__( __UpperCamelCase: int ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [True] * limit SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : List[Any] = False SCREAMING_SNAKE_CASE : List[Any] = True for i in range(3 ,int(limit**0.5 + 1 ) ,2 ): SCREAMING_SNAKE_CASE : Any = i * 2 while index < limit: SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : List[Any] = index + i SCREAMING_SNAKE_CASE : Tuple = [2] for i in range(3 ,__UpperCamelCase ,2 ): if is_prime[i]: primes.append(__UpperCamelCase ) return primes def lowercase__( __UpperCamelCase: int = 1_00_00_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = prime_sieve(__UpperCamelCase ) SCREAMING_SNAKE_CASE : Any = 0 SCREAMING_SNAKE_CASE : Dict = 0 for i in range(len(__UpperCamelCase ) ): for j in range(i + length ,len(__UpperCamelCase ) ): SCREAMING_SNAKE_CASE : Optional[int] = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: SCREAMING_SNAKE_CASE : Dict = j - i SCREAMING_SNAKE_CASE : Optional[Any] = sol return largest if __name__ == "__main__": print(F"""{solution() = }""")
251
0
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy UpperCAmelCase__ = logging.getLogger(__name__) def _UpperCAmelCase ( __lowerCamelCase : torch.nn.Module , __lowerCamelCase : BnbQuantizationConfig , __lowerCamelCase : Union[str, os.PathLike] = None , __lowerCamelCase : Optional[Dict[str, Union[int, str, torch.device]]] = None , __lowerCamelCase : Optional[List[str]] = None , __lowerCamelCase : Optional[Dict[Union[int, str], Union[int, str]]] = None , __lowerCamelCase : Optional[Union[str, os.PathLike]] = None , __lowerCamelCase : bool = False , ) -> Any: _snake_case = bnb_quantization_config.load_in_abit _snake_case = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) _snake_case = [] # custom device map if isinstance(__lowerCamelCase , __lowerCamelCase ) and len(device_map.keys() ) > 1: _snake_case = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: _snake_case = get_keys_to_not_convert(__lowerCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__lowerCamelCase ) _snake_case = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: _snake_case = [] _snake_case = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__lowerCamelCase ) # compatibility with peft _snake_case = load_in_abit _snake_case = load_in_abit _snake_case = get_parameter_device(__lowerCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) _snake_case = replace_with_bnb_layers(__lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase ) # convert param to the right dtype _snake_case = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: _snake_case = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) _snake_case = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__lowerCamelCase ): param.to(__lowerCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( f'''The model device type is {model_device.type}. However, cuda is needed for quantization.''' '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' ) else: with init_empty_weights(): _snake_case = replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , modules_to_not_convert=__lowerCamelCase ) _snake_case = get_quantized_model_device_map( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , max_memory=__lowerCamelCase , no_split_module_classes=__lowerCamelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): _snake_case = True _snake_case = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowerCamelCase , offload_state_dict=__lowerCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__lowerCamelCase , device_map=__lowerCamelCase , offload_dir=__lowerCamelCase ) def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : Any=None ) -> Union[str, Any]: if device_map is None: if torch.cuda.is_available(): _snake_case = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(__lowerCamelCase , __lowerCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) _snake_case = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) _snake_case = {} _snake_case = special_dtypes _snake_case = no_split_module_classes _snake_case = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": _snake_case = get_balanced_memory( __lowerCamelCase , low_zero=(device_map == '''balanced_low_0''') , max_memory=__lowerCamelCase , **__lowerCamelCase , ) _snake_case = max_memory _snake_case = infer_auto_device_map(__lowerCamelCase , **__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): # check if don't have any quantized module on the cpu _snake_case = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules _snake_case = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None ) -> List[Any]: if modules_to_not_convert is None: _snake_case = [] _snake_case , _snake_case = _replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any]=None , __lowerCamelCase : Dict=None , ) -> Dict: _snake_case = False for name, module in model.named_children(): if current_key_name is None: _snake_case = [] current_key_name.append(__lowerCamelCase ) if isinstance(__lowerCamelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` _snake_case = '''.'''.join(__lowerCamelCase ) _snake_case = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: _snake_case = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: _snake_case = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowerCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: _snake_case = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) _snake_case = module.weight.data if module.bias is not None: _snake_case = module.bias.data bnb_module.requires_grad_(__lowerCamelCase ) setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = True if len(list(module.children() ) ) > 0: _snake_case , _snake_case = _replace_with_bnb_layers( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase ( __lowerCamelCase : Optional[int] ) -> str: # Create a copy of the model with init_empty_weights(): _snake_case = deepcopy(__lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` _snake_case = find_tied_parameters(__lowerCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__lowerCamelCase , __lowerCamelCase ): _snake_case = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _snake_case = sum(__lowerCamelCase , [] ) _snake_case = len(__lowerCamelCase ) > 0 # Check if it is a base model _snake_case = False if hasattr(__lowerCamelCase , '''base_model_prefix''' ): _snake_case = not hasattr(__lowerCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _snake_case = list(model.named_children() ) _snake_case = [list_modules[-1][0]] # add last module together with tied weights _snake_case = set(__lowerCamelCase ) - set(__lowerCamelCase ) _snake_case = list(set(__lowerCamelCase ) ) + list(__lowerCamelCase ) # remove ".weight" from the keys _snake_case = ['''.weight''', '''.bias'''] _snake_case = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _snake_case = name.replace(__lowerCamelCase , '''''' ) filtered_module_names.append(__lowerCamelCase ) return filtered_module_names def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Dict: for m in model.modules(): if isinstance(__lowerCamelCase , bnb.nn.Linearabit ): return True return False def _UpperCAmelCase ( __lowerCamelCase : nn.Module ) -> Tuple: return next(parameter.parameters() ).device def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Dict ) -> List[Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , 0 , dtype=__lowerCamelCase , value=__lowerCamelCase ) _snake_case = param_name _snake_case = model if "." in tensor_name: _snake_case = tensor_name.split('''.''' ) for split in splits[:-1]: _snake_case = getattr(__lowerCamelCase , __lowerCamelCase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) _snake_case = new_module _snake_case = splits[-1] # offload weights _snake_case = False offload_weight(module._parameters[tensor_name] , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase , ) else: offload_weight(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index=__lowerCamelCase ) offload_weight(__lowerCamelCase , param_name.replace('''weight''' , '''SCB''' ) , __lowerCamelCase , index=__lowerCamelCase ) set_module_tensor_to_device(__lowerCamelCase , __lowerCamelCase , '''meta''' , dtype=__lowerCamelCase , value=torch.empty(*param.size() ) )
40
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class lowerCAmelCase__ ( A_ ): __a = 42 __a = jnp.floataa __a = True def lowercase ( self : Tuple ): super().setup() _snake_case = nn.Dense(5 , dtype=self.dtype ) def __call__( self : str , *_lowerCamelCase : int , **_lowerCamelCase : Any ): _snake_case = super().__call__(*_lowerCamelCase , **_lowerCamelCase ) _snake_case = self.cls(outputs[2] ) return outputs[:2] + (cls_out,) class lowerCAmelCase__ ( A_ ): __a = FlaxBigBirdForNaturalQuestionsModule def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ) -> Any: def cross_entropy(__lowerCamelCase : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=None ): _snake_case = logits.shape[-1] _snake_case = (labels[..., None] == jnp.arange(__lowerCamelCase )[None]).astype('''f4''' ) _snake_case = jax.nn.log_softmax(__lowerCamelCase , axis=-1 ) _snake_case = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: _snake_case = reduction(__lowerCamelCase ) return loss _snake_case = partial(__lowerCamelCase , reduction=jnp.mean ) _snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase ) _snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase ) _snake_case = cross_entropy(__lowerCamelCase , __lowerCamelCase ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class lowerCAmelCase__ : __a = "google/bigbird-roberta-base" __a = 3000 __a = 10500 __a = 128 __a = 3 __a = 1 __a = 5 # tx_args __a = 3e-5 __a = 0.0 __a = 20000 __a = 0.0095 __a = "bigbird-roberta-natural-questions" __a = "training-expt" __a = "data/nq-training.jsonl" __a = "data/nq-validation.jsonl" def lowercase ( self : Optional[Any] ): os.makedirs(self.base_dir , exist_ok=_lowerCamelCase ) _snake_case = os.path.join(self.base_dir , self.save_dir ) _snake_case = self.batch_size_per_device * jax.device_count() @dataclass class lowerCAmelCase__ : __a = 42 __a = 4096 # no dynamic padding on TPUs def __call__( self : Dict , _lowerCamelCase : Any ): _snake_case = self.collate_fn(_lowerCamelCase ) _snake_case = jax.tree_util.tree_map(_lowerCamelCase , _lowerCamelCase ) return batch def lowercase ( self : Dict , _lowerCamelCase : str ): _snake_case , _snake_case = self.fetch_inputs(features['''input_ids'''] ) _snake_case = { '''input_ids''': jnp.array(_lowerCamelCase , dtype=jnp.intaa ), '''attention_mask''': jnp.array(_lowerCamelCase , dtype=jnp.intaa ), '''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ), '''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ), '''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ), } return batch def lowercase ( self : List[Any] , _lowerCamelCase : list ): _snake_case = [self._fetch_inputs(_lowerCamelCase ) for ids in input_ids] return zip(*_lowerCamelCase ) def lowercase ( self : Optional[Any] , _lowerCamelCase : list ): _snake_case = [1 for _ in range(len(_lowerCamelCase ) )] while len(_lowerCamelCase ) < self.max_length: input_ids.append(self.pad_id ) attention_mask.append(0 ) return input_ids, attention_mask def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=None ) -> str: if seed is not None: _snake_case = dataset.shuffle(seed=__lowerCamelCase ) for i in range(len(__lowerCamelCase ) // batch_size ): _snake_case = dataset[i * batch_size : (i + 1) * batch_size] yield dict(__lowerCamelCase ) @partial(jax.pmap , axis_name='''batch''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]: def loss_fn(__lowerCamelCase : Union[str, Any] ): _snake_case = model_inputs.pop('''start_labels''' ) _snake_case = model_inputs.pop('''end_labels''' ) _snake_case = model_inputs.pop('''pooled_labels''' ) _snake_case = state.apply_fn(**__lowerCamelCase , params=__lowerCamelCase , dropout_rng=__lowerCamelCase , train=__lowerCamelCase ) _snake_case , _snake_case , _snake_case = outputs return state.loss_fn( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) _snake_case , _snake_case = jax.random.split(__lowerCamelCase ) _snake_case = jax.value_and_grad(__lowerCamelCase ) _snake_case , _snake_case = grad_fn(state.params ) _snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) _snake_case = jax.lax.pmean(__lowerCamelCase , '''batch''' ) _snake_case = state.apply_gradients(grads=__lowerCamelCase ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name='''batch''' ) def _UpperCAmelCase ( __lowerCamelCase : str , **__lowerCamelCase : List[str] ) -> Any: _snake_case = model_inputs.pop('''start_labels''' ) _snake_case = model_inputs.pop('''end_labels''' ) _snake_case = model_inputs.pop('''pooled_labels''' ) _snake_case = state.apply_fn(**__lowerCamelCase , params=state.params , train=__lowerCamelCase ) _snake_case , _snake_case , _snake_case = outputs _snake_case = state.loss_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' ) return metrics class lowerCAmelCase__ ( train_state.TrainState ): __a = struct.field(pytree_node=A_ ) @dataclass class lowerCAmelCase__ : __a = 42 __a = 42 __a = 42 __a = 42 __a = 42 __a = 42 __a = None def lowercase ( self : List[Any] , _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict=None ): _snake_case = model.params _snake_case = TrainState.create( apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , loss_fn=_lowerCamelCase , ) if ckpt_dir is not None: _snake_case , _snake_case , _snake_case , _snake_case , _snake_case = restore_checkpoint(_lowerCamelCase , _lowerCamelCase ) _snake_case = { '''lr''': args.lr, '''init_lr''': args.init_lr, '''warmup_steps''': args.warmup_steps, '''num_train_steps''': num_train_steps, '''weight_decay''': args.weight_decay, } _snake_case , _snake_case = build_tx(**_lowerCamelCase ) _snake_case = train_state.TrainState( step=_lowerCamelCase , apply_fn=model.__call__ , params=_lowerCamelCase , tx=_lowerCamelCase , opt_state=_lowerCamelCase , ) _snake_case = args _snake_case = data_collator _snake_case = lr _snake_case = params _snake_case = jax_utils.replicate(_lowerCamelCase ) return state def lowercase ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str ): _snake_case = self.args _snake_case = len(_lowerCamelCase ) // args.batch_size _snake_case = jax.random.PRNGKey(0 ) _snake_case = jax.random.split(_lowerCamelCase , jax.device_count() ) for epoch in range(args.max_epochs ): _snake_case = jnp.array(0 , dtype=jnp.floataa ) _snake_case = get_batched_dataset(_lowerCamelCase , args.batch_size , seed=_lowerCamelCase ) _snake_case = 0 for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc=f'''Running EPOCH-{epoch}''' ): _snake_case = self.data_collator(_lowerCamelCase ) _snake_case , _snake_case , _snake_case = self.train_step_fn(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 if i % args.logging_steps == 0: _snake_case = jax_utils.unreplicate(state.step ) _snake_case = running_loss.item() / i _snake_case = self.scheduler_fn(state_step - 1 ) _snake_case = self.evaluate(_lowerCamelCase , _lowerCamelCase ) _snake_case = { '''step''': state_step.item(), '''eval_loss''': eval_loss.item(), '''tr_loss''': tr_loss, '''lr''': lr.item(), } tqdm.write(str(_lowerCamelCase ) ) self.logger.log(_lowerCamelCase , commit=_lowerCamelCase ) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f'''-e{epoch}-s{i}''' , state=_lowerCamelCase ) def lowercase ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] ): _snake_case = get_batched_dataset(_lowerCamelCase , self.args.batch_size ) _snake_case = len(_lowerCamelCase ) // self.args.batch_size _snake_case = jnp.array(0 , dtype=jnp.floataa ) _snake_case = 0 for batch in tqdm(_lowerCamelCase , total=_lowerCamelCase , desc='''Evaluating ... ''' ): _snake_case = self.data_collator(_lowerCamelCase ) _snake_case = self.val_step_fn(_lowerCamelCase , **_lowerCamelCase ) running_loss += jax_utils.unreplicate(metrics['''loss'''] ) i += 1 return running_loss / i def lowercase ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ): _snake_case = jax_utils.unreplicate(_lowerCamelCase ) print(f'''SAVING CHECKPOINT IN {save_dir}''' , end=''' ... ''' ) self.model_save_fn(_lowerCamelCase , params=state.params ) with open(os.path.join(_lowerCamelCase , '''opt_state.msgpack''' ) , '''wb''' ) as f: f.write(to_bytes(state.opt_state ) ) joblib.dump(self.args , os.path.join(_lowerCamelCase , '''args.joblib''' ) ) joblib.dump(self.data_collator , os.path.join(_lowerCamelCase , '''data_collator.joblib''' ) ) with open(os.path.join(_lowerCamelCase , '''training_state.json''' ) , '''w''' ) as f: json.dump({'''step''': state.step.item()} , _lowerCamelCase ) print('''DONE''' ) def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> Tuple: print(f'''RESTORING CHECKPOINT FROM {save_dir}''' , end=''' ... ''' ) with open(os.path.join(__lowerCamelCase , '''flax_model.msgpack''' ) , '''rb''' ) as f: _snake_case = from_bytes(state.params , f.read() ) with open(os.path.join(__lowerCamelCase , '''opt_state.msgpack''' ) , '''rb''' ) as f: _snake_case = from_bytes(state.opt_state , f.read() ) _snake_case = joblib.load(os.path.join(__lowerCamelCase , '''args.joblib''' ) ) _snake_case = joblib.load(os.path.join(__lowerCamelCase , '''data_collator.joblib''' ) ) with open(os.path.join(__lowerCamelCase , '''training_state.json''' ) , '''r''' ) as f: _snake_case = json.load(__lowerCamelCase ) _snake_case = training_state['''step'''] print('''DONE''' ) return params, opt_state, step, args, data_collator def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> List[Any]: _snake_case = num_train_steps - warmup_steps _snake_case = optax.linear_schedule(init_value=__lowerCamelCase , end_value=__lowerCamelCase , transition_steps=__lowerCamelCase ) _snake_case = optax.linear_schedule(init_value=__lowerCamelCase , end_value=1E-7 , transition_steps=__lowerCamelCase ) _snake_case = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] ) -> List[str]: def weight_decay_mask(__lowerCamelCase : List[Any] ): _snake_case = traverse_util.flatten_dict(__lowerCamelCase ) _snake_case = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()} return traverse_util.unflatten_dict(__lowerCamelCase ) _snake_case = scheduler_fn(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _snake_case = optax.adamw(learning_rate=__lowerCamelCase , weight_decay=__lowerCamelCase , mask=__lowerCamelCase ) return tx, lr
40
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _lowercase ( lowerCAmelCase, unittest.TestCase ): """simple docstring""" __A = VideoToVideoSDPipeline __A = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} __A = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} __A = PipelineTesterMixin.required_optional_params - {"latents"} __A = False # No `output_type`. __A = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def UpperCamelCase_ (self ): """simple docstring""" torch.manual_seed(0 ) a = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) a = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) a = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) a = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , ) a = CLIPTextModel(lowerCamelCase_ ) a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) a = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 ): """simple docstring""" a = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ ) if str(lowerCamelCase_ ).startswith("mps" ): a = torch.manual_seed(lowerCamelCase_ ) else: a = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) a = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def UpperCamelCase_ (self ): """simple docstring""" a = "cpu" # ensure determinism for the device-dependent torch.Generator a = self.get_dummy_components() a = VideoToVideoSDPipeline(**lowerCamelCase_ ) a = sd_pipe.to(lowerCamelCase_ ) sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ ) a = self.get_dummy_inputs(lowerCamelCase_ ) a = "np" a = sd_pipe(**lowerCamelCase_ ).frames a = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) a = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def UpperCamelCase_ (self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ , expected_max_diff=5E-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase_ (self ): """simple docstring""" pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def UpperCamelCase_ (self ): """simple docstring""" pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def UpperCamelCase_ (self ): """simple docstring""" pass def UpperCamelCase_ (self ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" a = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames a = torch.Generator(device="cpu" ).manual_seed(0 ) a = torch.randn((1, 10, 3, 1024, 576) , generator=lowerCamelCase_ ) a = video.to("cuda" ) a = "Spiderman is surfing" a = pipe(lowerCamelCase_ , video=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=3 , output_type="pt" ).frames a = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
227
import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _lowercase: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class _lowercase ( lowerCAmelCase, unittest.TestCase ): """simple docstring""" __A = XLMProphetNetTokenizer __A = False __A = True def UpperCamelCase_ (self ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ (self ): """simple docstring""" a = "[PAD]" a = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "[PAD]" ) self.assertEqual(vocab_keys[1] , "[CLS]" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(lowerCamelCase_ ) , 1012 ) def UpperCamelCase_ (self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1012 ) def UpperCamelCase_ (self ): """simple docstring""" a = XLMProphetNetTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) a = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) a = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) a = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ] , ) a = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ] , ) @cached_property def UpperCamelCase_ (self ): """simple docstring""" return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" ) @slow def UpperCamelCase_ (self ): """simple docstring""" a = "Hello World!" a = [35389, 6672, 49, 2] self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) ) @slow def UpperCamelCase_ (self ): """simple docstring""" a = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
227
1
import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[str] ,__lowercase : int ): '''simple docstring''' A_ : Any = AlbertConfig.from_json_file(__lowercase ) print(f'''Building PyTorch model from configuration: {config}''' ) A_ : Union[str, Any] = AlbertForPreTraining(__lowercase ) # Load weights from tf checkpoint load_tf_weights_in_albert(__lowercase ,__lowercase ,__lowercase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() ,__lowercase ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--albert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained ALBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
192
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 _UpperCAmelCase = { """return_dict""": False, """output_hidden_states""": True, """output_attentions""": True, """torchscript""": True, """torch_dtype""": """float16""", """use_bfloat16""": True, """tf_legacy_loss""": True, """pruned_heads""": {"""a""": 1}, """tie_word_embeddings""": False, """is_decoder""": True, """cross_attention_hidden_size""": 128, """add_cross_attention""": True, """tie_encoder_decoder""": True, """max_length""": 50, """min_length""": 3, """do_sample""": True, """early_stopping""": True, """num_beams""": 3, """num_beam_groups""": 3, """diversity_penalty""": 0.5, """temperature""": 2.0, """top_k""": 10, """top_p""": 0.7, """typical_p""": 0.2, """repetition_penalty""": 0.8, """length_penalty""": 0.8, """no_repeat_ngram_size""": 5, """encoder_no_repeat_ngram_size""": 5, """bad_words_ids""": [1, 2, 3], """num_return_sequences""": 3, """chunk_size_feed_forward""": 5, """output_scores""": True, """return_dict_in_generate""": True, """forced_bos_token_id""": 2, """forced_eos_token_id""": 3, """remove_invalid_values""": True, """architectures""": ["""BertModel"""], """finetuning_task""": """translation""", """id2label""": {0: """label"""}, """label2id""": {"""label""": """0"""}, """tokenizer_class""": """BertTokenizerFast""", """prefix""": """prefix""", """bos_token_id""": 6, """pad_token_id""": 7, """eos_token_id""": 8, """sep_token_id""": 9, """decoder_start_token_id""": 10, """exponential_decay_length_penalty""": (5, 1.01), """suppress_tokens""": [0, 1], """begin_suppress_tokens""": 2, """task_specific_params""": {"""translation""": """some_params"""}, """problem_type""": """regression""", } @is_staging_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def lowerCAmelCase_ ( cls ): """simple docstring""" A_ : Optional[int] = TOKEN HfFolder.save_token(lowercase ) @classmethod def lowerCAmelCase_ ( cls ): """simple docstring""" try: delete_repo(token=cls._token , repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-config' ) except HTTPError: pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('test-config' , use_auth_token=self._token ) A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowercase , repo_id='test-config' , push_to_hub=lowercase , use_auth_token=self._token ) A_ : Dict = BertConfig.from_pretrained(F'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) config.push_to_hub('valid_org/test-config-org' , use_auth_token=self._token ) A_ : List[Any] = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowercase , repo_id='valid_org/test-config-org' , push_to_hub=lowercase , use_auth_token=self._token ) A_ : Optional[Any] = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowercase , getattr(lowercase , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" CustomConfig.register_for_auto_class() A_ : Optional[int] = CustomConfig(attribute=4_2 ) config.push_to_hub('test-dynamic-config' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'AutoConfig': 'custom_configuration.CustomConfig'} ) A_ : Optional[int] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' , trust_remote_code=lowercase ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , 'CustomConfig' ) self.assertEqual(new_config.attribute , 4_2 ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated A_ : Optional[int] = c.n_embd + 1 # int A_ : List[str] = c.resid_pdrop + 1.0 # float A_ : str = not c.scale_attn_weights # bool A_ : Optional[int] = c.summary_type + 'foo' # str c.update_from_string( F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowercase , c.n_embd , 'mismatch for key: n_embd' ) self.assertEqual(lowercase , c.resid_pdrop , 'mismatch for key: resid_pdrop' ) self.assertEqual(lowercase , c.scale_attn_weights , 'mismatch for key: scale_attn_weights' ) self.assertEqual(lowercase , c.summary_type , 'mismatch for key: summary_type' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = PretrainedConfig() A_ : int = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowercase , ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) A_ : List[str] = [key for key, value in config_common_kwargs.items() if value == getattr(lowercase , lowercase )] if len(lowercase ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' F''' {', '.join(lowercase )}.''' ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaises(lowercase ): # config is in subfolder, the following should not work without specifying the subfolder A_ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' , subfolder='bert' ) self.assertIsNotNone(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = mock.Mock() A_ : int = 5_0_0 A_ : Union[str, Any] = {} A_ : List[str] = HTTPError A_ : List[Any] = {} # Download this model to make sure it's in the cache. A_ : Tuple = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head: A_ : Optional[int] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = AutoConfig.from_pretrained('bert-base-cased' ) A_ : Tuple = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowercase ) A_ : Dict = 2 json.dump(configuration.to_dict() , open(os.path.join(lowercase , 'config.4.0.0.json' ) , 'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 A_ : int = ['config.42.0.0.json'] A_ : str = 7_6_8 configuration.save_pretrained(lowercase ) shutil.move(os.path.join(lowercase , 'config.4.0.0.json' ) , os.path.join(lowercase , 'config.42.0.0.json' ) ) A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertEqual(new_configuration.hidden_size , 7_6_8 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = 'hf-internal-testing/test-two-configs' import transformers as new_transformers A_ : List[Any] = 'v4.0.0' A_ , A_ : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained( lowercase , return_unused_kwargs=lowercase ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowercase , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers A_ : Optional[int] = 'v3.0.0' A_ : List[Any] = old_transformers.models.auto.AutoConfig.from_pretrained(lowercase ) self.assertEqual(old_configuration.hidden_size , 7_6_8 )
192
1
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> bool: if num < 0: return False snake_case : int = num snake_case : int = 0 while num > 0: snake_case : Any = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
124
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class __lowercase (UpperCamelCase__ , unittest.TestCase ): """simple docstring""" _snake_case = XLMRobertaTokenizer _snake_case = XLMRobertaTokenizerFast _snake_case = True _snake_case = True def UpperCAmelCase ( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing snake_case : Tuple = XLMRobertaTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase ( self ) -> int: snake_case : str = """<pad>""" snake_case : Tuple = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def UpperCAmelCase ( self ) -> int: snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(A ) , 1_0_0_2 ) def UpperCAmelCase ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 ) def UpperCAmelCase ( self ) -> Optional[int]: snake_case : Any = XLMRobertaTokenizer(A , keep_accents=A ) snake_case : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) snake_case : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) snake_case : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) snake_case : Any = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def UpperCAmelCase ( self ) -> Optional[Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return snake_case : List[Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): snake_case : Dict = self.rust_tokenizer_class.from_pretrained(A , **A ) snake_case : int = self.tokenizer_class.from_pretrained(A , **A ) snake_case : Optional[int] = tempfile.mkdtemp() snake_case : List[Any] = tokenizer_r.save_pretrained(A ) snake_case : Optional[Any] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) snake_case : Union[str, Any] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way snake_case : str = tokenizer_r.from_pretrained(A ) snake_case : Dict = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True snake_case : List[Any] = tempfile.mkdtemp() snake_case : Tuple = tokenizer_r.save_pretrained(A , legacy_format=A ) snake_case : Any = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way snake_case : int = tokenizer_r.from_pretrained(A ) snake_case : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False snake_case : Tuple = tempfile.mkdtemp() snake_case : str = tokenizer_r.save_pretrained(A , legacy_format=A ) snake_case : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case : Dict = tokenizer_r.from_pretrained(A ) snake_case : List[Any] = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @cached_property def UpperCAmelCase ( self ) -> str: return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" ) def UpperCAmelCase ( self ) -> Optional[int]: with tempfile.NamedTemporaryFile() as f: shutil.copyfile(A , f.name ) snake_case : Any = XLMRobertaTokenizer(f.name , keep_accents=A ) snake_case : List[Any] = pickle.dumps(A ) pickle.loads(A ) def UpperCAmelCase ( self ) -> Any: if not self.test_rust_tokenizer: return snake_case : str = self.get_tokenizer() snake_case : List[str] = self.get_rust_tokenizer() snake_case : str = """I was born in 92000, and this is falsé.""" snake_case : Optional[int] = tokenizer.tokenize(A ) snake_case : List[Any] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) snake_case : List[Any] = tokenizer.encode(A , add_special_tokens=A ) snake_case : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) snake_case : Optional[Any] = self.get_rust_tokenizer() snake_case : str = tokenizer.encode(A ) snake_case : Dict = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) @slow def UpperCAmelCase ( self ) -> List[Any]: snake_case : Dict = """Hello World!""" snake_case : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(A , self.big_tokenizer.encode(A ) ) @slow def UpperCAmelCase ( self ) -> str: snake_case : int = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) snake_case : Tuple = [ 0, 3_2_9_3, 8_3, 1_0, 4_5_5_2, 4_9_8_9, 7_9_8_6, 6_7_8, 1_0, 5_9_1_5, 1_1_1, 1_7_9_4_5_9, 1_2_4_8_5_0, 4, 6_0_4_4, 2_3_7, 1_2, 6, 5, 6, 4, 6_7_8_0, 7_0_5, 1_5, 1_3_8_8, 4_4, 3_7_8, 1_0_1_1_4, 7_1_1, 1_5_2, 2_0, 6, 5, 2_2_3_7_6, 6_4_2, 1_2_2_1, 1_5_1_9_0, 3_4_1_5_3, 4_5_0, 5_6_0_8, 9_5_9, 1_1_1_9, 5_7_7_0_2, 1_3_6, 1_8_6, 4_7, 1_0_9_8, 2_9_3_6_7, 4_7, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_0_4_4, 2_3_7, 6_2_8_4, 5_0_9_0_1, 5_2_8, 3_1, 9_0, 3_4, 9_2_7, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(A , self.big_tokenizer.encode(A ) ) @slow def UpperCAmelCase ( self ) -> str: # fmt: off snake_case : Tuple = {"""input_ids""": [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
124
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _lowerCamelCase : int = "python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''') def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : int=None ): '''simple docstring''' require_version(deps[pkg] , a__ )
371
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class __snake_case (_a ): def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: '''simple docstring''' _lowerCAmelCase : Union[str, Any] = SMALL_MODEL_IDENTIFIER _lowerCAmelCase : str = """pt""" _lowerCAmelCase : List[Any] = """tf""" def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : int = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : Optional[int] = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase ) model_tf.save_pretrained(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : int = """mock_framework""" # Framework provided - return whatever the user provides _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) _lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) _lowerCAmelCase : Optional[Any] = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: '''simple docstring''' with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) _lowerCAmelCase : str = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) _lowerCAmelCase : Tuple = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_UpperCAmelCase ): _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: '''simple docstring''' _lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ): _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _lowerCAmelCase : str = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase ) _lowerCAmelCase : Tuple = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch( """transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ): _lowerCAmelCase : Optional[int] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # Both not in environment -> raise error _lowerCAmelCase : List[str] = MagicMock(return_value=_UpperCAmelCase ) _lowerCAmelCase : List[Any] = MagicMock(return_value=_UpperCAmelCase ) with patch("""transformers.onnx.features.is_tf_available""" , _UpperCAmelCase ), patch( """transformers.onnx.features.is_torch_available""" , _UpperCAmelCase ): with self.assertRaises(_UpperCAmelCase ): _lowerCAmelCase : Dict = FeaturesManager.determine_framework(self.test_model )
159
0
import os def A ( _lowercase = "input.txt" ): with open(os.path.join(os.path.dirname(_lowercase ) , _lowercase ) ) as input_file: SCREAMING_SNAKE_CASE : Union[str, Any] = [ [int(_lowercase ) for element in line.split(''',''' )] for line in input_file.readlines() ] SCREAMING_SNAKE_CASE : Any = len(_lowercase ) SCREAMING_SNAKE_CASE : str = len(matrix[0] ) SCREAMING_SNAKE_CASE : int = [[-1 for _ in range(_lowercase )] for _ in range(_lowercase )] for i in range(_lowercase ): SCREAMING_SNAKE_CASE : List[str] = matrix[i][0] for j in range(1 , _lowercase ): for i in range(_lowercase ): SCREAMING_SNAKE_CASE : List[Any] = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , _lowercase ): SCREAMING_SNAKE_CASE : Any = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): SCREAMING_SNAKE_CASE : int = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
182
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase): @slow def __A ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' ) SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ ).loss SCREAMING_SNAKE_CASE : int = -tf.math.reduce_mean(UpperCamelCase__ ).numpy() SCREAMING_SNAKE_CASE : List[str] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
182
1
from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): @register_to_config def __init__( self , __UpperCamelCase = 7_6_8 , ): """simple docstring""" super().__init__() UpperCamelCase_ = nn.Parameter(torch.zeros(1 , __UpperCamelCase ) ) UpperCamelCase_ = nn.Parameter(torch.ones(1 , __UpperCamelCase ) ) def lowerCamelCase_ ( self , __UpperCamelCase = None , __UpperCamelCase = None , ): """simple docstring""" UpperCamelCase_ = nn.Parameter(self.mean.to(__UpperCamelCase ).to(__UpperCamelCase ) ) UpperCamelCase_ = nn.Parameter(self.std.to(__UpperCamelCase ).to(__UpperCamelCase ) ) return self def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = (embeds - self.mean) * 1.0 / self.std return embeds def lowerCamelCase_ ( self , __UpperCamelCase ): """simple docstring""" UpperCamelCase_ = (embeds * self.std) + self.mean return embeds
261
from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class lowercase_ ( __SCREAMING_SNAKE_CASE ): A__ : List[Any] = """EncodecFeatureExtractor""" A__ : Tuple = ("""T5Tokenizer""", """T5TokenizerFast""") def __init__( self , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" super().__init__(__UpperCamelCase , __UpperCamelCase ) UpperCamelCase_ = self.feature_extractor UpperCamelCase_ = False def lowerCamelCase_ ( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=__UpperCamelCase , language=__UpperCamelCase , no_timestamps=__UpperCamelCase ) def __call__( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__UpperCamelCase , **__UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""sampling_rate""" , __UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""text""" , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: UpperCamelCase_ = args[0] UpperCamelCase_ = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if text is not None: UpperCamelCase_ = self.tokenizer(__UpperCamelCase , **__UpperCamelCase ) if audio is not None: UpperCamelCase_ = self.feature_extractor(__UpperCamelCase , *__UpperCamelCase , sampling_rate=__UpperCamelCase , **__UpperCamelCase ) if audio is None: return inputs elif text is None: return audio_inputs else: UpperCamelCase_ = audio_inputs["""input_values"""] if "padding_mask" in audio_inputs: UpperCamelCase_ = audio_inputs["""padding_mask"""] return inputs def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" UpperCamelCase_ = kwargs.pop("""audio""" , __UpperCamelCase ) UpperCamelCase_ = kwargs.pop("""padding_mask""" , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: UpperCamelCase_ = args[0] UpperCamelCase_ = args[1:] if audio_values is not None: return self._decode_audio(__UpperCamelCase , padding_mask=__UpperCamelCase ) else: return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , *__UpperCamelCase , **__UpperCamelCase ): """simple docstring""" return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None ): """simple docstring""" UpperCamelCase_ = to_numpy(__UpperCamelCase ) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = audio_values.shape if padding_mask is None: return list(__UpperCamelCase ) UpperCamelCase_ = to_numpy(__UpperCamelCase ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) UpperCamelCase_ = seq_len - padding_mask.shape[-1] UpperCamelCase_ = 1 - self.feature_extractor.padding_value UpperCamelCase_ = np.pad(__UpperCamelCase , ((0, 0), (0, difference)) , """constant""" , constant_values=__UpperCamelCase ) UpperCamelCase_ = audio_values.tolist() for i in range(__UpperCamelCase ): UpperCamelCase_ = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] UpperCamelCase_ = sliced_audio.reshape(__UpperCamelCase , -1 ) return audio_values
261
1
from jiwer import compute_measures import datasets UpperCamelCase__ = """\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } """ UpperCamelCase__ = """\ Word error rate (WER) is a common metric of the performance of an automatic speech recognition system. The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort. This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate. Word error rate can then be computed as: WER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct words, N is the number of words in the reference (N=S+D+C). This value indicates the average number of errors per reference word. The lower the value, the better the performance of the ASR system with a WER of 0 being a perfect score. """ UpperCamelCase__ = """ Compute WER score of transcribed segments against references. Args: references: List of references for each speech input. predictions: List of transcriptions to score. concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively. Returns: (float): the word error rate Examples: >>> predictions = [\"this is the prediction\", \"there is an other sample\"] >>> references = [\"this is the reference\", \"there is another one\"] >>> wer = datasets.load_metric(\"wer\") >>> wer_score = wer.compute(predictions=predictions, references=references) >>> print(wer_score) 0.5 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", ] , ) def __SCREAMING_SNAKE_CASE( self , _A=None , _A=None , _A=False ): """simple docstring""" if concatenate_texts: return compute_measures(_A , _A )["wer"] else: __lowerCAmelCase = 0 __lowerCAmelCase = 0 for prediction, reference in zip(_A , _A ): __lowerCAmelCase = compute_measures(_A , _A ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
92
class a__ ( snake_case__ ): pass class a__ ( snake_case__ ): pass class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [ [], [], [], ] def __SCREAMING_SNAKE_CASE( self , _A , _A ): """simple docstring""" try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError("Maximum queue size is 100" ) self.queues[priority].append(_A ) except IndexError: raise ValueError("Valid priorities are 0, 1, and 2" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError("All queues are empty" ) def __str__( self ): """simple docstring""" return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class a__ : def __init__( self ): """simple docstring""" __lowerCAmelCase = [] def __SCREAMING_SNAKE_CASE( self , _A ): """simple docstring""" if len(self.queue ) == 1_0_0: raise OverFlowError("Maximum queue size is 100" ) self.queue.append(_A ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" if not self.queue: raise UnderFlowError("The queue is empty" ) else: __lowerCAmelCase = min(self.queue ) self.queue.remove(_A ) return data def __str__( self ): """simple docstring""" return str(self.queue ) def _a ( ): __lowerCAmelCase = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 1_00 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def _a ( ): __lowerCAmelCase = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(1_00 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(1_28 ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(SCREAMING_SNAKE_CASE_ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
92
1
def __A (__A : int = 600851475143 ) -> int: """simple docstring""" try: UpperCAmelCase_ = int(__A ) except (TypeError, ValueError): raise TypeError('''Parameter n must be int or castable to int.''' ) if n <= 0: raise ValueError('''Parameter n must be greater than or equal to one.''' ) UpperCAmelCase_ = 2 UpperCAmelCase_ = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 UpperCAmelCase_ = i while n % i == 0: UpperCAmelCase_ = n // i i += 1 return int(__A ) if __name__ == "__main__": print(f"{solution() = }")
354
from timeit import timeit def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: number &= number - 1 result += 1 return result def A (__A : int ) -> int: """simple docstring""" if number < 0: raise ValueError('''the value of input must not be negative''' ) UpperCAmelCase_ = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def A () -> None: """simple docstring""" def do_benchmark(__A : int ) -> None: UpperCAmelCase_ = '''import __main__ as z''' print(F"""Benchmark when {number = }:""" ) print(F"""{get_set_bits_count_using_modulo_operator(__A ) = }""" ) UpperCAmelCase_ = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__A ) print(F"""timeit() runs in {timing} seconds""" ) print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__A ) = }""" ) UpperCAmelCase_ = timeit( '''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__A , ) print(F"""timeit() runs in {timing} seconds""" ) for number in (25, 37, 58, 0): do_benchmark(__A ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
7
0
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
173
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> float: _lowercase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def UpperCamelCase_( ) -> Optional[int]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
21
0
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean lowercase : Optional[Any] = 0 lowercase : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right lowercase : int = tuple[int, int] class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None: snake_case_ : Optional[int] = pos_x snake_case_ : Tuple = pos_y snake_case_ : List[str] = (pos_y, pos_x) snake_case_ : Optional[int] = goal_x snake_case_ : int = goal_y snake_case_ : str = g_cost snake_case_ : str = parent snake_case_ : str = self.calculate_heuristic() snake_case_ : Tuple = self.g_cost + self.h_cost def _lowerCAmelCase ( self ) -> float: snake_case_ : Tuple = self.pos_x - self.goal_x snake_case_ : int = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(_SCREAMING_SNAKE_CASE ) + abs(_SCREAMING_SNAKE_CASE ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , _SCREAMING_SNAKE_CASE ) -> bool: return self.f_cost < other.f_cost class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: snake_case_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , _SCREAMING_SNAKE_CASE ) snake_case_ : Any = [self.start] snake_case_ : list[Node] = [] snake_case_ : List[Any] = False def _lowerCAmelCase ( self ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() snake_case_ : Optional[Any] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path snake_case_ : List[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) return [self.start.pos] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> list[Node]: snake_case_ : List[str] = [] for action in delta: snake_case_ : str = parent.pos_x + action[1] snake_case_ : str = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> list[TPosition]: snake_case_ : int = node snake_case_ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case_ : int = current_node.parent path.reverse() return path class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: snake_case_ : List[Any] = AStar(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Optional[int] = AStar(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) snake_case_ : Union[str, Any] = False def _lowerCAmelCase ( self ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() snake_case_ : List[Any] = self.fwd_astar.open_nodes.pop(0 ) snake_case_ : Tuple = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.fwd_astar.closed_nodes.append(_SCREAMING_SNAKE_CASE ) self.bwd_astar.closed_nodes.append(_SCREAMING_SNAKE_CASE ) snake_case_ : List[Any] = current_bwd_node snake_case_ : Union[str, Any] = current_fwd_node snake_case_ : int = { self.fwd_astar: self.fwd_astar.get_successors(_SCREAMING_SNAKE_CASE ), self.bwd_astar: self.bwd_astar.get_successors(_SCREAMING_SNAKE_CASE ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path snake_case_ : Optional[Any] = astar.open_nodes.pop( astar.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: astar.open_nodes.append(_SCREAMING_SNAKE_CASE ) return [self.fwd_astar.start.pos] def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[TPosition]: snake_case_ : List[str] = self.fwd_astar.retrace_path(_SCREAMING_SNAKE_CASE ) snake_case_ : List[str] = self.bwd_astar.retrace_path(_SCREAMING_SNAKE_CASE ) bwd_path.pop() bwd_path.reverse() snake_case_ : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] lowercase : List[str] = (0, 0) lowercase : Dict = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase : int = time.time() lowercase : Tuple = AStar(init, goal) lowercase : Dict = a_star.search() lowercase : Dict = time.time() - start_time print(F"""AStar execution time = {end_time:f} seconds""") lowercase : List[Any] = time.time() lowercase : Union[str, Any] = BidirectionalAStar(init, goal) lowercase : List[Any] = time.time() - bd_start_time print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
370
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def lowerCAmelCase__ ( ): snake_case_ : str = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores" , type=_a , default=1 , help="Number of TPU cores to use (1 or 8)." ) # positional parser.add_argument( "training_script" , type=_a , help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ) , ) # rest from the training program parser.add_argument("training_script_args" , nargs=_a ) return parser.parse_args() def lowerCAmelCase__ ( ): snake_case_ : str = parse_args() # Import training_script as a module. snake_case_ : Any = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) snake_case_ : Tuple = script_fpath.stem snake_case_ : str = importlib.import_module(_a ) # Patch sys.argv snake_case_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
36
0