code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import os from distutils.util import strtobool def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Optional[Any] ): '''simple docstring''' for e in env_keys: A_ : Optional[int] = int(os.environ.get(a__ ,-1 ) ) if val >= 0: return val return default def UpperCamelCase ( __lowercase : Any ,__lowercase : Dict=False ): '''simple docstring''' A_ : Tuple = os.environ.get(a__ ,str(a__ ) ) return strtobool(a__ ) == 1 # As its name indicates `strtobool` actually returns an int... def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Optional[Any]="no" ): '''simple docstring''' A_ : Any = os.environ.get(a__ ,str(a__ ) ) return value
715
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase ( ): '''simple docstring''' A_ , A_ : Any = 9, 14 # noqa: F841 A_ : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : List[Any] = defaultdict(__lowercase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A_ : Tuple = mst(__lowercase ) A_ : Tuple = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : List[Any] = tuple(answer[:2] ) A_ : Union[str, Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
70
0
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) _UpperCAmelCase = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } _UpperCAmelCase = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } _UpperCAmelCase = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } _UpperCAmelCase = { """num_train_timesteps""": 40, """sigma_min""": 0.002, """sigma_max""": 80.0, } _UpperCAmelCase = { """num_train_timesteps""": 201, """sigma_min""": 0.002, """sigma_max""": 80.0, } _UpperCAmelCase = { """num_train_timesteps""": 151, """sigma_min""": 0.002, """sigma_max""": 80.0, } def UpperCamelCase ( __lowercase : Tuple ): '''simple docstring''' if isinstance(snake_case__ ,snake_case__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase ( __lowercase : Any ,__lowercase : Dict ,__lowercase : List[Any] ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any]=False ): '''simple docstring''' A_ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] A_ : str = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] A_ : Optional[int] = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] A_ : Optional[int] = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] A_ : Any = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] A_ : int = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] A_ : Optional[int] = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] A_ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] A_ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] A_ : Optional[int] = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.weight'''] A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Tuple ,__lowercase : List[Any] ,__lowercase : List[Any] ,__lowercase : Optional[Any]=None ): '''simple docstring''' A_ , A_ , A_ : Optional[int] = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 ) A_ , A_ , A_ : int = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 ) A_ : Union[str, Any] = checkpoint[f'''{old_prefix}.norm.weight'''] A_ : Tuple = checkpoint[f'''{old_prefix}.norm.bias'''] A_ : Optional[Any] = weight_q.squeeze(-1 ).squeeze(-1 ) A_ : Tuple = bias_q.squeeze(-1 ).squeeze(-1 ) A_ : Optional[Any] = weight_k.squeeze(-1 ).squeeze(-1 ) A_ : Union[str, Any] = bias_k.squeeze(-1 ).squeeze(-1 ) A_ : int = weight_v.squeeze(-1 ).squeeze(-1 ) A_ : Any = bias_v.squeeze(-1 ).squeeze(-1 ) A_ : List[Any] = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) A_ : str = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase ( __lowercase : str ,__lowercase : Optional[int] ): '''simple docstring''' A_ : int = torch.load(snake_case__ ,map_location='cpu' ) A_ : Any = {} A_ : Optional[int] = checkpoint['time_embed.0.weight'] A_ : Optional[Any] = checkpoint['time_embed.0.bias'] A_ : Tuple = checkpoint['time_embed.2.weight'] A_ : List[Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: A_ : Optional[int] = checkpoint['label_emb.weight'] A_ : int = checkpoint['input_blocks.0.0.weight'] A_ : Optional[int] = checkpoint['input_blocks.0.0.bias'] A_ : Dict = unet_config['down_block_types'] A_ : List[Any] = unet_config['layers_per_block'] A_ : Optional[int] = unet_config['attention_head_dim'] A_ : Tuple = unet_config['block_out_channels'] A_ : Union[str, Any] = 1 A_ : List[str] = channels_list[0] for i, layer_type in enumerate(snake_case__ ): A_ : Optional[Any] = channels_list[i] A_ : List[str] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(snake_case__ ): A_ : Optional[int] = f'''down_blocks.{i}.resnets.{j}''' A_ : Optional[int] = f'''input_blocks.{current_layer}.0''' A_ : List[str] = True if j == 0 and downsample_block_has_skip else False A_ : int = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(snake_case__ ): A_ : Tuple = f'''down_blocks.{i}.resnets.{j}''' A_ : List[Any] = f'''input_blocks.{current_layer}.0''' A_ : int = True if j == 0 and downsample_block_has_skip else False A_ : List[str] = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ ) A_ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}''' A_ : Optional[int] = f'''input_blocks.{current_layer}.1''' A_ : List[Any] = convert_attention( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) current_layer += 1 if i != len(snake_case__ ) - 1: A_ : Optional[Any] = f'''down_blocks.{i}.downsamplers.0''' A_ : Optional[int] = f'''input_blocks.{current_layer}.0''' A_ : Union[str, Any] = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) current_layer += 1 A_ : int = current_channels # hardcoded the mid-block for now A_ : Optional[Any] = 'mid_block.resnets.0' A_ : Any = 'middle_block.0' A_ : List[str] = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) A_ : Dict = 'mid_block.attentions.0' A_ : Tuple = 'middle_block.1' A_ : Dict = convert_attention(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) A_ : List[Any] = 'mid_block.resnets.1' A_ : str = 'middle_block.2' A_ : str = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) A_ : int = 0 A_ : List[str] = unet_config['up_block_types'] for i, layer_type in enumerate(snake_case__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): A_ : Union[str, Any] = f'''up_blocks.{i}.resnets.{j}''' A_ : Optional[Any] = f'''output_blocks.{current_layer}.0''' A_ : Dict = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ ) current_layer += 1 if i != len(snake_case__ ) - 1: A_ : Optional[Any] = f'''up_blocks.{i}.upsamplers.0''' A_ : int = f'''output_blocks.{current_layer-1}.1''' A_ : Dict = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): A_ : str = f'''up_blocks.{i}.resnets.{j}''' A_ : Dict = f'''output_blocks.{current_layer}.0''' A_ : Any = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,has_skip=snake_case__ ) A_ : int = f'''up_blocks.{i}.attentions.{j}''' A_ : Optional[int] = f'''output_blocks.{current_layer}.1''' A_ : Any = convert_attention( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) current_layer += 1 if i != len(snake_case__ ) - 1: A_ : List[Any] = f'''up_blocks.{i}.upsamplers.0''' A_ : int = f'''output_blocks.{current_layer-1}.2''' A_ : int = convert_resnet(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) A_ : Tuple = checkpoint['out.0.weight'] A_ : List[Any] = checkpoint['out.0.bias'] A_ : List[str] = checkpoint['out.2.weight'] A_ : Any = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = strabool(args.class_cond) _UpperCAmelCase = os.path.basename(args.unet_path) print(F"""Checkpoint: {ckpt_name}""") # Get U-Net config if "imagenet64" in ckpt_name: _UpperCAmelCase = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _UpperCAmelCase = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: _UpperCAmelCase = TEST_UNET_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") if not args.class_cond: _UpperCAmelCase = None _UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config) _UpperCAmelCase = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: _UpperCAmelCase = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: _UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): _UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""") _UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config) _UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
716
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): '''simple docstring''' A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase ) A_ : Any = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowercase ) env_command_parser(subparsers=__lowercase ) launch_command_parser(subparsers=__lowercase ) tpu_command_parser(subparsers=__lowercase ) test_command_parser(subparsers=__lowercase ) # Let's go A_ : Optional[Any] = parser.parse_args() if not hasattr(__lowercase ,'func' ): parser.print_help() exit(1 ) # Run args.func(__lowercase ) if __name__ == "__main__": main()
70
0
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
717
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = DistilBertTokenizer lowerCamelCase_ = DistilBertTokenizerFast lowerCamelCase_ = True @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
70
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
718
import random def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Tuple = num - 1 A_ : Optional[Any] = 0 while s % 2 == 0: A_ : Optional[int] = s // 2 t += 1 for _ in range(5 ): A_ : Optional[int] = random.randrange(2 ,num - 1 ) A_ : Any = pow(__lowercase ,__lowercase ,__lowercase ) if v != 1: A_ : List[str] = 0 while v != (num - 1): if i == t - 1: return False else: A_ : Union[str, Any] = i + 1 A_ : Tuple = (v**2) % num return True def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if num < 2: return False A_ : Optional[Any] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowercase ) def UpperCamelCase ( __lowercase : int = 10_24 ): '''simple docstring''' while True: A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) ) if is_prime_low_num(__lowercase ): return num if __name__ == "__main__": _UpperCAmelCase = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
70
0
from copy import deepcopy class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase = None , lowercase = None ): """simple docstring""" if arr is None and size is not None: A_ : Dict = size A_ : Dict = [0] * size elif arr is not None: self.init(SCREAMING_SNAKE_CASE_ ) else: raise ValueError('Either arr or size must be specified' ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = len(SCREAMING_SNAKE_CASE_ ) A_ : str = deepcopy(SCREAMING_SNAKE_CASE_ ) for i in range(1 , self.size ): A_ : Optional[int] = self.next_(SCREAMING_SNAKE_CASE_ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): A_ : int = self.next_(SCREAMING_SNAKE_CASE_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCAmelCase_ ( lowercase ): """simple docstring""" return index + (index & (-index)) @staticmethod def lowerCAmelCase_ ( lowercase ): """simple docstring""" return index - (index & (-index)) def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value A_ : Any = self.next_(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if right == 0: return 0 A_ : int = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] A_ : Union[str, Any] = self.prev(SCREAMING_SNAKE_CASE_ ) return result def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.query(SCREAMING_SNAKE_CASE_ , index + 1 ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" value -= self.tree[0] if value < 0: return -1 A_ : List[Any] = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 A_ : str = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _UpperCAmelCase = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""ViTFeatureExtractor"""] _UpperCAmelCase = ["""ViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """VIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTForImageClassification""", """ViTForMaskedImageModeling""", """ViTModel""", """ViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """TFViTForImageClassification""", """TFViTModel""", """TFViTPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """FlaxViTForImageClassification""", """FlaxViTModel""", """FlaxViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
720
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = FlaxAutoencoderKL @property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = 4 A_ : int = 3 A_ : List[str] = (3_2, 3_2) A_ : Any = jax.random.PRNGKey(0 ) A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } A_ : int = self.dummy_input return init_dict, inputs_dict
70
0
def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Optional[int] = int(__lowercase ) if n_element < 1: A_ : Dict = ValueError('a should be a positive number' ) raise my_error A_ : str = [1] A_ : List[Any] = (0, 0, 0) A_ : Any = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 ,hamming_list[j] * 3 ,hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": _UpperCAmelCase = input("""Enter the last number (nth term) of the Hamming Number Series: """) print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""") _UpperCAmelCase = hamming(int(n)) print("""-----------------------------------------------------""") print(F"""The list with nth numbers is: {hamming_numbers}""") print("""-----------------------------------------------------""")
721
import numpy as np _UpperCAmelCase = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" A_ : Any = np.array(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE ) A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : int = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = message.lower() A_ : Tuple = message.replace(' ' , '' ) A_ : int = message.replace('j' , 'i' ) A_ : Any = np.empty((2, len(lowercase )) ) for letter_index in range(len(lowercase ) ): A_ : Optional[int] = self.letter_to_numbers(message[letter_index] ) A_ : Union[str, Any] = numbers[0] A_ : Union[str, Any] = numbers[1] A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) ) A_ : int = '' for numbers_index in range(len(lowercase ) ): A_ : str = int(second_step[numbers_index * 2] ) A_ : str = int(second_step[(numbers_index * 2) + 1] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : Tuple = encoded_message + letter return encoded_message def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[int] = message.lower() message.replace(' ' , '' ) A_ : Tuple = np.empty(2 * len(lowercase ) ) for letter_index in range(len(lowercase ) ): A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] ) A_ : Optional[int] = numbers[0] A_ : Dict = numbers[1] A_ : Optional[int] = first_step.reshape((2, len(lowercase )) ) A_ : List[str] = '' for numbers_index in range(len(lowercase ) ): A_ : List[Any] = int(second_step[0, numbers_index] ) A_ : Optional[int] = int(second_step[1, numbers_index] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : str = decoded_message + letter return decoded_message
70
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = tempfile.mkdtemp() # fmt: off A_ : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on A_ : Optional[int] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) A_ : List[str] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] A_ : Any = {'unk_token': '<unk>'} A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_lowerCamelCase ) ) A_ : List[str] = { 'do_resize': True, 'size': 2_0, 'do_center_crop': True, 'crop_size': 1_8, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } A_ : Optional[Any] = os.path.join(self.tmpdirname , _lowerCamelCase ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(_lowerCamelCase , _lowerCamelCase ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] A_ : str = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_tokenizer() A_ : Optional[int] = self.get_rust_tokenizer() A_ : Union[str, Any] = self.get_image_processor() A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) A_ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase ) A_ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) A_ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A_ : Union[str, Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) A_ : Any = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 ) A_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCamelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCamelCase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.get_image_processor() A_ : int = self.get_tokenizer() A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Optional[int] = self.prepare_image_inputs() A_ : Tuple = image_processor(_lowerCamelCase , return_tensors='np' ) A_ : List[str] = processor(images=_lowerCamelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.get_image_processor() A_ : List[Any] = self.get_tokenizer() A_ : Dict = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = 'lower newer' A_ : Any = processor(text=_lowerCamelCase ) A_ : Union[str, Any] = tokenizer(_lowerCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : List[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : Optional[Any] = 'lower newer' A_ : Dict = self.prepare_image_inputs() A_ : Optional[Any] = processor(text=_lowerCamelCase , images=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = self.prepare_image_inputs() A_ : List[str] = self.prepare_image_inputs() A_ : int = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(_lowerCamelCase ): processor() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Tuple = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase ) A_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : Any = processor.batch_decode(_lowerCamelCase ) A_ : str = tokenizer.batch_decode(_lowerCamelCase ) self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
700
from math import sqrt def UpperCamelCase ( __lowercase : int = 1_00_00_00 ): '''simple docstring''' A_ : int = 0 A_ : int = 0 A_ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowercase ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
70
0
from __future__ import annotations def UpperCamelCase ( __lowercase ): '''simple docstring''' A_ : Optional[int] = str(lowerCamelCase_ ) return len(lowerCamelCase_ ) == 9 and set(lowerCamelCase_ ) == set('123456789' ) def UpperCamelCase ( ): '''simple docstring''' for base_num in range(99_99 ,49_99 ,-1 ): A_ : Union[str, Any] = 10_00_02 * base_num if is_9_pandigital(lowerCamelCase_ ): return candidate for base_num in range(3_33 ,99 ,-1 ): A_ : Any = 1_00_20_03 * base_num if is_9_pandigital(lowerCamelCase_ ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
701
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ): """simple docstring""" super().__init__() A_ : Tuple = initial_learning_rate A_ : List[str] = warmup_steps A_ : int = power A_ : Dict = decay_schedule_fn A_ : Any = name def __call__( self , lowercase ): """simple docstring""" with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A_ : Optional[int] = tf.cast(lowercase , tf.floataa ) A_ : int = tf.cast(self.warmup_steps , tf.floataa ) A_ : Optional[int] = global_step_float / warmup_steps_float A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,): '''simple docstring''' A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,) if num_warmup_steps: A_ : Tuple = WarmUp( initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,) if weight_decay_rate > 0.0: A_ : Union[str, Any] = AdamWeightDecay( learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,) else: A_ : Dict = tf.keras.optimizers.Adam( learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ): """simple docstring""" super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase ) A_ : Dict = weight_decay_rate A_ : Union[str, Any] = include_in_weight_decay A_ : str = exclude_from_weight_decay @classmethod def lowerCAmelCase_ ( cls , lowercase ): """simple docstring""" A_ : Tuple = {'WarmUp': WarmUp} return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase ) A_ : Optional[Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ , A_ : Optional[int] = list(zip(*lowercase ) ) return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} A_ : List[str] = apply_state or {} A_ : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: A_ : Dict = self._fallback_apply_state(lowercase , lowercase ) A_ : int = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase , lowercase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase , lowercase ) is not None: return False return True class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self ): """simple docstring""" A_ : int = [] A_ : Optional[int] = None @property def lowerCAmelCase_ ( self ): """simple docstring""" if self._accum_steps is None: A_ : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowercase ): """simple docstring""" if not self._gradients: A_ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' ) for accum_gradient, gradient in zip(self._gradients , lowercase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase ) self._accum_steps.assign_add(1 ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase ) )
70
0
import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _UpperCAmelCase = float("""nan""") class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" A_ : Dict = sys.stdout A_ : Any = open(_lowercase , 'a' ) def __getattr__( self , lowercase ): """simple docstring""" return getattr(self.stdout , _lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" self.stdout.write(_lowercase ) # strip tqdm codes self.file.write(re.sub(r'^.*\r' , '' , _lowercase , 0 , re.M ) ) def UpperCamelCase ( __lowercase : Optional[int]=80 ,__lowercase : Optional[int]=False ): '''simple docstring''' A_ : Dict = [] # deal with critical env vars A_ : Dict = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A_ : int = os.environ.get(UpperCamelCase__ ,UpperCamelCase__ ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) A_ : Optional[Any] = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(UpperCamelCase__ ) # now the normal args cmd += list(map(shlex.quote ,sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A_ : Optional[int] = [] A_ : List[str] = '' while len(UpperCamelCase__ ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(UpperCamelCase__ ) A_ : Optional[Any] = '' return "\\\n".join(UpperCamelCase__ ) def UpperCamelCase ( __lowercase : str ,__lowercase : Any ): '''simple docstring''' A_ : Any = re.sub(r'[\\\n]+' ,' ' ,args.base_cmd ) # remove --output_dir if any and set our own A_ : Union[str, Any] = re.sub('--output_dir\s+[^\s]+' ,'' ,args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir A_ : Dict = re.sub('--overwrite_output_dir\s+' ,'' ,args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def UpperCamelCase ( __lowercase : Any ,__lowercase : List[Any] ,__lowercase : Optional[int] ,__lowercase : Optional[int] ,__lowercase : Optional[int] ,__lowercase : List[str] ,__lowercase : Tuple ): '''simple docstring''' if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 ,1_00 ) for k in metric_keys} ,**{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} ,) A_ : str = subprocess.run(UpperCamelCase__ ,capture_output=UpperCamelCase__ ,text=UpperCamelCase__ ) if verbose: print('STDOUT' ,result.stdout ) print('STDERR' ,result.stderr ) # save the streams A_ : Any = variation.replace(' ' ,'-' ) with open(Path(UpperCamelCase__ ) / f'''log.{prefix}.stdout.txt''' ,'w' ) as f: f.write(result.stdout ) with open(Path(UpperCamelCase__ ) / f'''log.{prefix}.stderr.txt''' ,'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' ,'r' ,encoding='utf-8' ) as f: A_ : Union[str, Any] = json.load(UpperCamelCase__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def UpperCamelCase ( __lowercase : str ,__lowercase : int ,__lowercase : Any ,__lowercase : Union[str, Any] ,__lowercase : Optional[Any] ,__lowercase : Optional[int] ,__lowercase : Optional[int] ,__lowercase : Tuple ,__lowercase : List[str] ,__lowercase : str ,): '''simple docstring''' A_ : List[Any] = [] A_ : List[str] = [] A_ : Union[str, Any] = f'''{id}: {variation:<{longest_variation_len}}''' A_ : Tuple = f'''{preamble}: ''' A_ : Any = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(UpperCamelCase__ ) ,desc=UpperCamelCase__ ,leave=UpperCamelCase__ ): A_ : Optional[Any] = process_run_single( UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ) A_ : str = single_run_metrics[target_metric_key] if not math.isnan(UpperCamelCase__ ): metrics.append(UpperCamelCase__ ) results.append(UpperCamelCase__ ) outcome += "✓" else: outcome += "✘" A_ : List[str] = f'''\33[2K\r{outcome}''' if len(UpperCamelCase__ ) > 0: A_ : Tuple = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A_ : Optional[Any] = round(mean_metrics[target_metric_key] ,2 ) A_ : Dict = f'''{outcome} {mean_target}''' if len(UpperCamelCase__ ) > 1: results_str += f''' {tuple(round(UpperCamelCase__ ,2 ) for x in results )}''' print(UpperCamelCase__ ) A_ : Tuple = variation return mean_metrics else: print(UpperCamelCase__ ) return {variation_key: variation, target_metric_key: nan} def UpperCamelCase ( ): '''simple docstring''' A_ : List[str] = torch.cuda.get_device_properties(torch.device('cuda' ) ) return f'''\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n''' def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Any ,__lowercase : str ,__lowercase : int ,__lowercase : int ): '''simple docstring''' A_ : Optional[int] = pd.DataFrame(UpperCamelCase__ ) A_ : Tuple = 'variation' A_ : Any = 'diff_%' A_ : Union[str, Any] = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A_ : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(UpperCamelCase__ ): # as a fallback, use the minimal value as the sentinel A_ : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(UpperCamelCase__ ): A_ : int = df.apply( lambda __lowercase : round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 ,axis='columns' ,) # re-order columns A_ : Optional[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys] A_ : Dict = df.reindex(UpperCamelCase__ ,axis='columns' ) # reorder cols # capitalize A_ : str = df.rename(str.capitalize ,axis='columns' ) # make the cols as narrow as possible A_ : List[Any] = df.rename(lambda __lowercase : c.replace('_' ,'<br>' ) ,axis='columns' ) A_ : Any = df.rename(lambda __lowercase : c.replace('_' ,'\n' ) ,axis='columns' ) A_ : Any = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=UpperCamelCase__ ,floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=UpperCamelCase__ ,floatfmt='.2f' )] print('\n\n'.join(UpperCamelCase__ ) ) def UpperCamelCase ( ): '''simple docstring''' A_ : int = argparse.ArgumentParser() parser.add_argument( '--base-cmd' ,default=UpperCamelCase__ ,type=UpperCamelCase__ ,required=UpperCamelCase__ ,help='Base cmd' ,) parser.add_argument( '--variations' ,default=UpperCamelCase__ ,type=UpperCamelCase__ ,nargs='+' ,required=UpperCamelCase__ ,help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' ,) parser.add_argument( '--base-variation' ,default=UpperCamelCase__ ,type=UpperCamelCase__ ,help='Baseline variation to compare to. if None the minimal target value will be used to compare against' ,) parser.add_argument( '--target-metric-key' ,default=UpperCamelCase__ ,type=UpperCamelCase__ ,required=UpperCamelCase__ ,help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' ,) parser.add_argument( '--report-metric-keys' ,default='' ,type=UpperCamelCase__ ,help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' ,) parser.add_argument( '--repeat-times' ,default=1 ,type=UpperCamelCase__ ,help='How many times to re-run each variation - an average will be reported' ,) parser.add_argument( '--output_dir' ,default='output_benchmark' ,type=UpperCamelCase__ ,help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' ,) parser.add_argument( '--verbose' ,default=UpperCamelCase__ ,action='store_true' ,help='Whether to show the outputs of each run or just the benchmark progress' ,) A_ : Dict = parser.parse_args() A_ : List[str] = args.output_dir Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) A_ : str = get_base_command(UpperCamelCase__ ,UpperCamelCase__ ) # split each dimension into its --foo variations A_ : List[str] = [list(map(str.strip ,re.split(r'\|' ,UpperCamelCase__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A_ : List[Any] = list(map(str.strip ,map(' '.join ,itertools.product(*UpperCamelCase__ ) ) ) ) A_ : List[Any] = max(len(UpperCamelCase__ ) for x in variations ) # split wanted keys A_ : int = args.report_metric_keys.split() # capture prints into a log file for convenience A_ : Optional[int] = f'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) A_ : List[Any] = Tee(UpperCamelCase__ ) print(f'''\n*** Running {len(UpperCamelCase__ )} benchmarks:''' ) print(f'''Base command: {' '.join(UpperCamelCase__ )}''' ) A_ : Optional[Any] = 'variation' A_ : Optional[Any] = [] for id, variation in enumerate(tqdm(UpperCamelCase__ ,desc='Total completion: ' ,leave=UpperCamelCase__ ) ): A_ : str = base_cmd + variation.split() results.append( process_run( id + 1 ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,args.target_metric_key ,UpperCamelCase__ ,args.repeat_times ,UpperCamelCase__ ,args.verbose ,) ) process_results(UpperCamelCase__ ,args.target_metric_key ,UpperCamelCase__ ,args.base_variation ,UpperCamelCase__ ) if __name__ == "__main__": main()
702
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : Any = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[str] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
70
0
'''simple docstring''' from copy import deepcopy class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase = None , lowercase = None ): """simple docstring""" if arr is None and size is not None: A_ : Tuple = size A_ : List[Any] = [0] * size elif arr is not None: self.init(lowercase_ ) else: raise ValueError('Either arr or size must be specified' ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Tuple = len(lowercase_ ) A_ : List[str] = deepcopy(lowercase_ ) for i in range(1 , self.size ): A_ : int = self.next_(lowercase_ ) if j < self.size: self.tree[j] += self.tree[i] def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): A_ : str = self.next_(lowercase_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def lowerCAmelCase_ ( lowercase ): """simple docstring""" return index + (index & (-index)) @staticmethod def lowerCAmelCase_ ( lowercase ): """simple docstring""" return index - (index & (-index)) def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value A_ : Union[str, Any] = self.next_(lowercase_ ) def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" self.add(lowercase_ , value - self.get(lowercase_ ) ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if right == 0: return 0 A_ : List[Any] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] A_ : List[str] = self.prev(lowercase_ ) return result def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self.prefix(lowercase_ ) - self.prefix(lowercase_ ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.query(lowercase_ , index + 1 ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" value -= self.tree[0] if value < 0: return -1 A_ : int = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 A_ : int = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
703
def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : int = len(__lowercase ) A_ : List[Any] = sum(__lowercase ) A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 ,n + 1 ): A_ : Optional[Any] = True for i in range(1 ,s + 1 ): A_ : Tuple = False for i in range(1 ,n + 1 ): for j in range(1 ,s + 1 ): A_ : Dict = dp[i][j - 1] if arr[i - 1] <= j: A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) ,-1 ,-1 ): if dp[n][j] is True: A_ : List[Any] = s - 2 * j break return diff
70
0
import os def UpperCamelCase ( ): '''simple docstring''' with open(os.path.dirname(__lowercase ) + '/p022_names.txt' ) as file: A_ : Optional[int] = str(file.readlines()[0] ) A_ : List[str] = names.replace('"' ,'' ).split(',' ) names.sort() A_ : Any = 0 A_ : int = 0 for i, name in enumerate(__lowercase ): for letter in name: name_score += ord(__lowercase ) - 64 total_score += (i + 1) * name_score A_ : Union[str, Any] = 0 return total_score if __name__ == "__main__": print(solution())
704
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : List[Any] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) ) self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : str = get_activation('gelu' ) A_ : int = get_activation('gelu_10' ) A_ : Optional[int] = torch_builtin(lowercase ) A_ : Tuple = geluaa(lowercase ) A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(lowercase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase_ ( self ): """simple docstring""" get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(lowercase ): get_activation('bogus' ) with self.assertRaises(lowercase ): get_activation(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = get_activation('gelu' ) A_ : List[str] = 1 A_ : Optional[Any] = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(lowercase ): A_ : str = acta.a
70
0
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _UpperCAmelCase = pytest.mark.integration @require_faiss class UpperCAmelCase ( __lowercase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowercase ) for x in np.arange(3_0 ).tolist()]} ) return dset def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : Dataset = self._create_dummy_dataset() A_ : int = dset.map( lambda lowercase , lowercase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowercase , keep_in_memory=lowercase ) A_ : str = dset.add_faiss_index('vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT ) A_ : List[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_0_0 , metric_type=faiss.METRIC_INNER_PRODUCT , ) A_ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) A_ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((3_0, 5) ) * np.arange(3_0 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(lowercase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def lowerCAmelCase_ ( self ): """simple docstring""" from elasticsearch import Elasticsearch A_ : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: A_ : List[Any] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 3_0 ) A_ : Dict = {"hits": {"hits": [{"_score": 1, "_id": 2_9}]}} A_ : List[Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=lowercase ) A_ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class UpperCAmelCase ( __lowercase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 1_0 ) # single query A_ : Union[str, Any] = np.zeros(5 , dtype=np.floataa ) A_ : Union[str, Any] = 1 A_ : List[str] = index.search(lowercase ) self.assertRaises(lowercase , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries A_ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1] A_ : Optional[int] = index.search_batch(lowercase ) self.assertRaises(lowercase , index.search_batch , queries[0] ) A_ : int = [scores[0] for scores in total_scores] A_ : str = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : Optional[Any] = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) A_ : str = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(lowercase ): A_ : Union[str, Any] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : Optional[int] = faiss.IndexFlat(5 ) A_ : List[Any] = FaissIndex(custom_index=lowercase ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def lowerCAmelCase_ ( self ): """simple docstring""" import faiss A_ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=lowercase ) as tmp_file: index.save(tmp_file.name ) A_ : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) A_ : Any = np.zeros(5 , dtype=np.floataa ) A_ : List[str] = 1 A_ : Optional[Any] = index.search(lowercase ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def UpperCamelCase ( __lowercase : Dict ): '''simple docstring''' import faiss A_ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) A_ : str = "index.faiss" A_ : Dict = f'''mock://{index_name}''' index.save(_lowerCamelCase ,storage_options=mockfs.storage_options ) A_ : int = FaissIndex.load(_lowerCamelCase ,storage_options=mockfs.storage_options ) A_ : List[Any] = np.zeros(5 ,dtype=np.floataa ) A_ : Optional[int] = 1 A_ : List[str] = index.search(_lowerCamelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class UpperCAmelCase ( __lowercase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: A_ : Tuple = Elasticsearch() A_ : Any = {"acknowledged": True} A_ : str = ElasticSearchIndex(es_client=lowercase ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query A_ : List[Any] = "foo" A_ : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} A_ : Union[str, Any] = index.search(lowercase ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout A_ : Any = "foo" A_ : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} A_ : int = index.search(lowercase , request_timeout=3_0 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries A_ : List[Any] = ["foo", "bar", "foobar"] A_ : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} A_ : int = index.search_batch(lowercase ) A_ : Dict = [scores[0] for scores in total_scores] A_ : Any = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([1, 1, 1] , lowercase ) # batched queries with timeout A_ : Optional[int] = ["foo", "bar", "foobar"] A_ : Dict = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} A_ : List[Any] = index.search_batch(lowercase , request_timeout=3_0 ) A_ : List[str] = [scores[0] for scores in total_scores] A_ : List[str] = [indices[0] for indices in total_indices] self.assertGreater(np.min(lowercase ) , 0 ) self.assertListEqual([1, 1, 1] , lowercase )
705
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """RegNetConfig""" # Base docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , ) A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[str] = self.convolution(self.padding(lowercase ) ) A_ : List[str] = self.normalization(lowercase ) A_ : List[Any] = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[int] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[int] = self.embedder(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' ) A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" return self.normalization(self.convolution(lowercase ) , training=lowercase ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) A_ : Optional[Any] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = self.pooler(lowercase ) for layer_module in self.attention: A_ : Optional[Any] = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : Optional[int] = max(1 , out_channels // config.groups_width ) A_ : List[Any] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Optional[int] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ), ] A_ : List[str] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : int = layer_module(lowercase ) A_ : Union[str, Any] = self.shortcut(lowercase ) hidden_state += residual A_ : Dict = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : int = max(1 , out_channels // config.groups_width ) A_ : Optional[int] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) A_ : List[str] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ), ] A_ : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = hidden_state for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer A_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ), *[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ): """simple docstring""" A_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Dict = hidden_states + (hidden_state,) A_ : List[Any] = stage_module(lowercase ) if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' lowerCamelCase_ = RegNetConfig def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[Any] = config A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' ) A_ : str = TFRegNetEncoder(lowercase , name='encoder' ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase ) A_ : Optional[int] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Dict = encoder_outputs[0] A_ : List[Any] = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = RegNetConfig lowerCamelCase_ = '''regnet''' lowerCamelCase_ = '''pixel_values''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _UpperCAmelCase = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : int = TFRegNetMainLayer(lowercase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Tuple = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) class UpperCAmelCase ( __A , __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : List[Any] = config.num_labels A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' ) # classification head A_ : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : List[Any] = self.classifier[0](lowercase ) A_ : Union[str, Any] = self.classifier[1](lowercase ) A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
70
0
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def UpperCamelCase ( __lowercase : Optional[Any] ): '''simple docstring''' A_ : str = filter(lambda __lowercase : p.requires_grad ,model.parameters() ) A_ : Optional[Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _UpperCAmelCase = logging.getLogger(__name__) def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Any ): '''simple docstring''' if metric == "rouge2": A_ : List[Any] = '{val_avg_rouge2:.4f}-{step_count}' elif metric == "bleu": A_ : Tuple = '{val_avg_bleu:.4f}-{step_count}' elif metric == "em": A_ : Dict = '{val_avg_em:.4f}-{step_count}' else: raise NotImplementedError( f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ' function.' ) A_ : List[str] = ModelCheckpoint( dirpath=_SCREAMING_SNAKE_CASE ,filename=_SCREAMING_SNAKE_CASE ,monitor=f'''val_{metric}''' ,mode='max' ,save_top_k=3 ,every_n_epochs=1 ,) return checkpoint_callback def UpperCamelCase ( __lowercase : str ,__lowercase : List[str] ): '''simple docstring''' return EarlyStopping( monitor=f'''val_{metric}''' ,mode='min' if 'loss' in metric else 'max' ,patience=_SCREAMING_SNAKE_CASE ,verbose=_SCREAMING_SNAKE_CASE ,) class UpperCAmelCase ( pl.Callback ): '''simple docstring''' def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : int = {F'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(__UpperCamelCase ) @rank_zero_only def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=True ): """simple docstring""" logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) A_ : Optional[Any] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} ) # Log results A_ : int = Path(pl_module.hparams.output_dir ) if type_path == "test": A_ : Dict = od / 'test_results.txt' A_ : int = od / 'test_generations.txt' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. A_ : List[str] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' A_ : Tuple = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=__UpperCamelCase ) generations_file.parent.mkdir(exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , 'a+' ) as writer: for key in sorted(__UpperCamelCase ): if key in ["log", "progress_bar", "preds"]: continue A_ : str = metrics[key] if isinstance(__UpperCamelCase , torch.Tensor ): A_ : Union[str, Any] = val.item() A_ : int = F'''{key}: {val:.6f}\n''' writer.write(__UpperCamelCase ) if not save_generations: return if "preds" in metrics: A_ : Tuple = '\n'.join(metrics['preds'] ) generations_file.open('w+' ).write(__UpperCamelCase ) @rank_zero_only def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" try: A_ : Tuple = pl_module.model.model.num_parameters() except AttributeError: A_ : Optional[Any] = pl_module.model.num_parameters() A_ : int = count_trainable_parameters(__UpperCamelCase ) # mp stands for million parameters trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} ) @rank_zero_only def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) return self._write_logs(__UpperCamelCase , __UpperCamelCase , 'test' ) @rank_zero_only def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" save_json(pl_module.metrics , pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
706
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase ( A_ , A_ , A_ , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline lowerCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} lowerCamelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCamelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase_ ( self ): """simple docstring""" torch.manual_seed(0 ) A_ : Tuple = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , ) A_ : int = PNDMScheduler(skip_prk_steps=lowercase ) torch.manual_seed(0 ) A_ : List[Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) A_ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) A_ : str = CLIPTextModel(lowercase ) A_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A_ : Any = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def lowerCAmelCase_ ( self , lowercase , lowercase=0 ): """simple docstring""" A_ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase ) ).to(lowercase ) A_ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Tuple = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ) if str(lowercase ).startswith('mps' ): A_ : List[str] = torch.manual_seed(lowercase ) else: A_ : Optional[Any] = torch.Generator(device=lowercase ).manual_seed(lowercase ) A_ : str = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : List[Any] = self.get_dummy_components() A_ : int = StableDiffusionInstructPixaPixPipeline(**lowercase ) A_ : List[Any] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = self.get_dummy_inputs(lowercase ) A_ : Any = sd_pipe(**lowercase ).images A_ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) A_ : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Tuple = self.get_dummy_components() A_ : Tuple = StableDiffusionInstructPixaPixPipeline(**lowercase ) A_ : Dict = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : Any = self.get_dummy_inputs(lowercase ) A_ : List[Any] = """french fries""" A_ : Optional[int] = sd_pipe(**lowercase , negative_prompt=lowercase ) A_ : List[Any] = output.images A_ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) A_ : int = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : Union[str, Any] = self.get_dummy_components() A_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowercase ) A_ : List[Any] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : Optional[Any] = self.get_dummy_inputs(lowercase ) A_ : Tuple = [inputs["""prompt"""]] * 2 A_ : List[Any] = np.array(inputs['image'] ).astype(np.floataa ) / 2_5_5.0 A_ : List[str] = torch.from_numpy(lowercase ).unsqueeze(0 ).to(lowercase ) A_ : int = image / 2 + 0.5 A_ : str = image.permute(0 , 3 , 1 , 2 ) A_ : List[str] = image.repeat(2 , 1 , 1 , 1 ) A_ : Optional[Any] = sd_pipe(**lowercase ).images A_ : Union[str, Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) A_ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator A_ : int = self.get_dummy_components() A_ : Optional[int] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) A_ : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowercase ) A_ : List[Any] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : List[str] = self.get_dummy_inputs(lowercase ) A_ : List[Any] = sd_pipe(**lowercase ).images A_ : Tuple = image[0, -3:, -3:, -1] A_ : Union[str, Any] = [round(lowercase , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(lowercase ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) A_ : Optional[int] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_dummy_components() A_ : Any = StableDiffusionInstructPixaPixPipeline(**lowercase ) A_ : Optional[Any] = VaeImageProcessor(do_resize=lowercase , do_normalize=lowercase ) A_ : int = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) A_ : Optional[Any] = pipe(**self.get_dummy_inputs_by_type(lowercase , input_image_type='pt' ) )[0] A_ : Union[str, Any] = components["""vae"""] A_ : int = self.get_dummy_inputs_by_type(lowercase , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): A_ : Any = vae.encode(inputs[image_param] ).latent_dist.mode() A_ : Optional[Any] = pipe(**lowercase )[0] A_ : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowercase , 1E-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self , lowercase=0 ): """simple docstring""" A_ : Optional[Any] = torch.manual_seed(lowercase ) A_ : Tuple = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) A_ : List[str] = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : int = self.get_inputs() A_ : Optional[int] = pipe(**lowercase ).images A_ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) A_ : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowercase ) A_ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : str = self.get_inputs() A_ : Optional[Any] = pipe(**lowercase ).images A_ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) A_ : Union[str, Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowercase ) A_ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : Optional[int] = self.get_inputs() A_ : Dict = pipe(**lowercase ).images A_ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) A_ : str = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = 0 def callback_fn(lowercase , lowercase , lowercase ) -> None: A_ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: A_ : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) A_ : int = latents[0, -3:, -3:, -1] A_ : int = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: A_ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) A_ : List[Any] = latents[0, -3:, -3:, -1] A_ : Tuple = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 A_ : Optional[int] = False A_ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowercase , torch_dtype=torch.floataa ) A_ : int = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : Tuple = self.get_inputs() pipe(**lowercase , callback=lowercase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCAmelCase_ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=lowercase , torch_dtype=torch.floataa ) A_ : List[str] = pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ : int = self.get_inputs() A_ : Dict = pipe(**lowercase ) A_ : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 A_ : List[str] = inputs["""image"""].resize((5_0_4, 5_0_4) ) A_ : List[str] = """timbrooks/instruct-pix2pix""" A_ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowercase , safety_checker=lowercase , ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : List[str] = pipe(**lowercase ) A_ : List[Any] = output.images[0] A_ : Optional[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) A_ : List[str] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
707
def UpperCamelCase ( __lowercase : list ): '''simple docstring''' A_ : str = len(__lowercase ) for _ in range(__lowercase ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: A_ , A_ : Optional[Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _UpperCAmelCase = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
70
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _UpperCAmelCase = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
708
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''wavlm''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : List[Any] = hidden_size A_ : Tuple = feat_extract_norm A_ : Dict = feat_extract_activation A_ : Optional[Any] = list(lowercase ) A_ : Union[str, Any] = list(lowercase ) A_ : List[str] = list(lowercase ) A_ : str = conv_bias A_ : Tuple = num_buckets A_ : Union[str, Any] = max_bucket_distance A_ : int = num_conv_pos_embeddings A_ : str = num_conv_pos_embedding_groups A_ : str = len(self.conv_dim ) A_ : Tuple = num_hidden_layers A_ : Tuple = intermediate_size A_ : Optional[Any] = hidden_act A_ : Optional[Any] = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Optional[Any] = activation_dropout A_ : Optional[int] = feat_proj_dropout A_ : List[Any] = final_dropout A_ : Union[str, Any] = layerdrop A_ : Dict = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : str = num_ctc_classes A_ : Any = vocab_size A_ : str = do_stable_layer_norm A_ : int = use_weighted_layer_sum A_ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : List[str] = apply_spec_augment A_ : Optional[Any] = mask_time_prob A_ : int = mask_time_length A_ : Any = mask_time_min_masks A_ : Optional[int] = mask_feature_prob A_ : Tuple = mask_feature_length # parameters for pretraining with codevector quantized representations A_ : int = num_codevectors_per_group A_ : Any = num_codevector_groups A_ : List[Any] = contrastive_logits_temperature A_ : Optional[Any] = num_negatives A_ : Optional[Any] = codevector_dim A_ : int = proj_codevector_dim A_ : int = diversity_loss_weight # ctc loss A_ : Union[str, Any] = ctc_loss_reduction A_ : Any = ctc_zero_infinity # adapter A_ : int = add_adapter A_ : Optional[Any] = adapter_kernel_size A_ : Optional[int] = adapter_stride A_ : Dict = num_adapter_layers A_ : str = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Tuple = list(lowercase ) A_ : Optional[Any] = list(lowercase ) A_ : Dict = list(lowercase ) A_ : Dict = xvector_output_dim @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
70
0
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration _UpperCAmelCase = """facebook/wmt19-en-de""" _UpperCAmelCase = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model _UpperCAmelCase = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) _UpperCAmelCase = FSMTForConditionalGeneration(config) print(F"""num of params {tiny_model.num_parameters()}""") # Test _UpperCAmelCase = tokenizer(["""Making tiny model"""], return_tensors="""pt""") _UpperCAmelCase = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save _UpperCAmelCase = """tiny-wmt19-en-de""" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F"""Generated {mname_tiny}""") # Upload # transformers-cli upload tiny-wmt19-en-de
709
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger() def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase ) else: A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase ) if hidden_sizes == 1_92: A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase ) if hidden_sizes == 2_56: A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase ) if hidden_sizes == 3_84: A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase ) from_model.eval() A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval() A_ : Union[str, Any] = OrderedDict() A_ : Dict = from_model.state_dict() A_ : Tuple = list(from_model.state_dict().keys() ) A_ : str = list(our_model.state_dict().keys() ) print(len(__lowercase ) ,len(__lowercase ) ) for i in range(len(__lowercase ) ): A_ : str = weights[og_keys[i]] our_model.load_state_dict(__lowercase ) A_ : str = torch.randn((2, 3, 2_24, 2_24) ) A_ : str = from_model(__lowercase ) A_ : Optional[Any] = our_model(__lowercase ).logits assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one." A_ : List[str] = name print(__lowercase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) A_ : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ): '''simple docstring''' A_ : Dict = 'imagenet-1k-id2label.json' A_ : Optional[int] = 10_00 A_ : Optional[int] = (1, num_labels) A_ : int = 'huggingface/label-files' A_ : int = num_labels A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : int = {int(__lowercase ): v for k, v in idalabel.items()} A_ : List[str] = idalabel A_ : str = {v: k for k, v in idalabel.items()} A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase ) A_ : Any = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } A_ : Tuple = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
70
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _UpperCAmelCase = get_tests_dir("""fixtures""") _UpperCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _UpperCAmelCase = get_tests_dir("""fixtures/dummy-config.json""") class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = 0 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A_ : str = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally A_ : Dict = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ ).to_dict() config_dict.pop('feature_extractor_type' ) A_ : Dict = WavaVecaFeatureExtractor(**UpperCamelCase__ ) # save in new folder model_config.save_pretrained(UpperCamelCase__ ) config.save_pretrained(UpperCamelCase__ ) A_ : Any = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ ) # make sure private variable is not incorrectly saved A_ : int = json.loads(config.to_json_string() ) self.assertTrue('_processor_class' not in dict_as_saved ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , 'bert-base is not a local folder and is not a valid model identifier' ): A_ : Any = AutoFeatureExtractor.from_pretrained('bert-base' ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ): A_ : Optional[Any] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ , revision='aaaaaa' ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaisesRegex( UpperCamelCase__ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ): A_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaises(UpperCamelCase__ ): A_ : Dict = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCamelCase__ ): A_ : Optional[Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCamelCase__ ) A_ : List[str] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase__ ) A_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ , trust_remote_code=UpperCamelCase__ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) def lowerCAmelCase_ ( self ): """simple docstring""" try: AutoConfig.register('custom' , UpperCamelCase__ ) AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCamelCase__ ): AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API A_ : Any = CustomFeatureExtractor.from_pretrained(UpperCamelCase__ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(UpperCamelCase__ ) A_ : List[Any] = AutoFeatureExtractor.from_pretrained(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCAmelCase_ ( self ): """simple docstring""" class UpperCAmelCase ( lowercase_ ): '''simple docstring''' lowerCamelCase_ = True try: AutoConfig.register('custom' , UpperCamelCase__ ) AutoFeatureExtractor.register(UpperCamelCase__ , UpperCamelCase__ ) # If remote code is not set, the default is to use local A_ : int = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. A_ : List[Any] = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub A_ : Dict = AutoFeatureExtractor.from_pretrained( 'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCamelCase__ ) self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' ) self.assertTrue(not hasattr(UpperCamelCase__ , 'is_local' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
710
def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' A_ : int = word.split() def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str: A_ : Optional[Any] = max_width - width A_ : Union[str, Any] = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: A_ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] A_ : int = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] A_ : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 A_ : Tuple = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) A_ : List[str] = [] A_ : list[str] = [] A_ : Dict = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase ,__lowercase ,__lowercase ) ) # reset new line and new width A_ , A_ : Any = [word], len(__lowercase ) A_ : int = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
70
0
def UpperCamelCase ( __lowercase : List[Any] ): '''simple docstring''' A_ : str = 0 while len(__lowerCAmelCase ) > 1: A_ : Optional[Any] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): A_ : str = files.index(min(__lowerCAmelCase ) ) temp += files[min_index] files.pop(__lowerCAmelCase ) files.append(__lowerCAmelCase ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
711
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _UpperCAmelCase = logging.getLogger(__name__) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''summarization''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ROUGE_KEYS lowerCamelCase_ = '''rouge2''' def __init__( self , lowercase , **lowercase ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: A_ : str = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) A_ : List[str] = Path(self.output_dir ) / 'metrics.json' A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) A_ : str = 0 A_ : Any = defaultdict(lowercase ) A_ : Union[str, Any] = self.config.model_type A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size A_ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } A_ : Optional[Any] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} A_ : Tuple = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) A_ : int = get_git_info()['repo_sha'] A_ : int = hparams.num_workers A_ : Union[str, Any] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] A_ : Any = self.decoder_start_token_id A_ : str = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) A_ : Union[str, Any] = False A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: A_ : int = self.hparams.eval_max_gen_length else: A_ : List[Any] = self.model.config.max_length A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) A_ : int = True return readable_batch def lowerCAmelCase_ ( self , lowercase , **lowercase ): """simple docstring""" return self.model(lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[Any] = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = self.tokenizer.pad_token_id A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask'] A_ : str = batch['labels'] if isinstance(self.model , lowercase ): A_ : Optional[int] = self.model._shift_right(lowercase ) else: A_ : Any = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero A_ : Optional[Any] = decoder_input_ids self.save_readable_batch(lowercase ) A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) A_ : Dict = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 ) A_ , A_ : Any = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def lowerCAmelCase_ ( self ): """simple docstring""" return self.tokenizer.pad_token_id def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : str = self._step(lowercase ) A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) ) # tokens per batch A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() A_ : str = batch['input_ids'].shape[0] A_ : Any = batch['input_ids'].eq(self.pad ).sum() A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase="val" ): """simple docstring""" self.step_count += 1 A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} A_ : Dict = losses['loss'] A_ : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } A_ : Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} A_ : Tuple = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path A_ : Dict = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_rouge(lowercase , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') A_ : Optional[int] = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) A_ : int = (time.time() - ta) / batch['input_ids'].shape[0] A_ : List[str] = self.ids_to_clean_text(lowercase ) A_ : List[str] = self.ids_to_clean_text(batch['labels'] ) A_ : List[Any] = self._step(lowercase ) A_ : int = dict(zip(self.loss_names , lowercase ) ) A_ : Dict = self.calc_generative_metrics(lowercase , lowercase ) A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.validation_epoch_end(lowercase , prefix='test' ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = self.n_obs[type_path] A_ : List[Any] = self.target_lens[type_path] A_ : str = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ): """simple docstring""" A_ : Optional[int] = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": A_ : str = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( '--max_source_length' , default=1_0_2_4 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=5_6 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase ) parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase ) parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( '--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''translation''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ['''bleu'''] lowerCamelCase_ = '''bleu''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , **lowercase ) A_ : List[Any] = hparams.src_lang A_ : str = hparams.tgt_lang def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_bleu(lowercase , lowercase ) def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=__lowercase ) check_output_dir(__lowercase ,expected_items=3 ) if model is None: if "summarization" in args.task: A_ : SummarizationModule = SummarizationModule(__lowercase ) else: A_ : SummarizationModule = TranslationModule(__lowercase ) A_ : Optional[int] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): A_ : List[str] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase ) A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience ) else: A_ : str = False A_ : Dict = args.val_metric == 'loss' A_ : pl.Trainer = generic_train( __lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback( args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,) pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model A_ : Optional[Any] = '' A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) ) if checkpoints: A_ : List[Any] = checkpoints[-1] A_ : Any = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() _UpperCAmelCase = pl.Trainer.add_argparse_args(parser) _UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _UpperCAmelCase = parser.parse_args() main(args)
70
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCAmelCase = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""LayoutXLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""LayoutXLMTokenizerFast"""] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
712
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ): """simple docstring""" A_ : List[Any] = parent A_ : Optional[Any] = batch_size A_ : Dict = image_size A_ : str = num_channels A_ : Union[str, Any] = embeddings_size A_ : Optional[Any] = hidden_sizes A_ : Any = depths A_ : List[str] = is_training A_ : int = use_labels A_ : Optional[Any] = hidden_act A_ : List[Any] = num_labels A_ : Optional[int] = scope A_ : int = len(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Any = TFRegNetModel(config=lowercase ) A_ : Optional[Any] = model(lowercase , training=lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : int = self.num_labels A_ : Tuple = TFRegNetForImageClassification(lowercase ) A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : List[Any] = config_and_inputs A_ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase_ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFRegNetModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(lowercase ) A_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[Any] = [*signature.parameters.keys()] A_ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : List[Any] = model_class(lowercase ) A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase ) A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : int = layer_type A_ : Tuple = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Any = True check_hidden_states_output(lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase ) A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowercase , lowercase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: A_ : Dict = model_class(lowercase ) A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase ) A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : Any = self._prepare_for_class(lowercase , lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A_ : int = self.default_image_processor A_ : List[str] = prepare_img() A_ : Any = image_processor(images=lowercase , return_tensors='tf' ) # forward pass A_ : Tuple = model(**lowercase , training=lowercase ) # verify the logits A_ : int = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
70
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
713
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ): '''simple docstring''' A_ : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ): '''simple docstring''' A_ : int = 0 while b > 0: if b & 1: A_ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
70
0
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = ['''image_processor''', '''tokenizer'''] lowerCamelCase_ = '''AutoImageProcessor''' lowerCamelCase_ = '''AutoTokenizer''' def __init__( self , lowercase=None , lowercase=None , **lowercase ): """simple docstring""" A_ : Any = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , __UpperCamelCase , ) A_ : str = kwargs.pop('feature_extractor' ) A_ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(__UpperCamelCase , __UpperCamelCase ) A_ : Union[str, Any] = self.image_processor A_ : Any = False def __call__( self , *lowercase , **lowercase ): """simple docstring""" if self._in_target_context_manager: return self.current_processor(*__UpperCamelCase , **__UpperCamelCase ) A_ : Optional[Any] = kwargs.pop('images' , __UpperCamelCase ) A_ : Dict = kwargs.pop('text' , __UpperCamelCase ) if len(__UpperCamelCase ) > 0: A_ : Optional[Any] = args[0] A_ : Dict = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: A_ : List[str] = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) if text is not None: A_ : Optional[Any] = self.tokenizer(__UpperCamelCase , **__UpperCamelCase ) if text is None: return inputs elif images is None: return encodings else: A_ : str = encodings['input_ids'] return inputs def lowerCAmelCase_ ( self , *lowercase , **lowercase ): """simple docstring""" return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase_ ( self , *lowercase , **lowercase ): """simple docstring""" return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @contextmanager def lowerCAmelCase_ ( self ): """simple docstring""" warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) A_ : Dict = True A_ : str = self.tokenizer yield A_ : int = self.image_processor A_ : Optional[Any] = False def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=None ): """simple docstring""" if added_vocab is None: A_ : Optional[int] = self.tokenizer.get_added_vocab() A_ : Tuple = {} while tokens: A_ : List[Any] = re.search(r'<s_(.*?)>' , __UpperCamelCase , re.IGNORECASE ) if start_token is None: break A_ : List[str] = start_token.group(1 ) A_ : Dict = re.search(rF'''</s_{key}>''' , __UpperCamelCase , re.IGNORECASE ) A_ : Dict = start_token.group() if end_token is None: A_ : int = tokens.replace(__UpperCamelCase , '' ) else: A_ : Dict = end_token.group() A_ : Optional[int] = re.escape(__UpperCamelCase ) A_ : str = re.escape(__UpperCamelCase ) A_ : Union[str, Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , __UpperCamelCase , re.IGNORECASE ) if content is not None: A_ : List[Any] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node A_ : List[str] = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase ) if value: if len(__UpperCamelCase ) == 1: A_ : List[str] = value[0] A_ : int = value else: # leaf nodes A_ : str = [] for leaf in content.split(r'<sep/>' ): A_ : List[str] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": A_ : Tuple = leaf[1:-2] # for categorical special tokens output[key].append(__UpperCamelCase ) if len(output[key] ) == 1: A_ : List[Any] = output[key][0] A_ : str = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase ) if len(__UpperCamelCase ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def lowerCAmelCase_ ( self ): """simple docstring""" warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __UpperCamelCase , ) return self.image_processor_class @property def lowerCAmelCase_ ( self ): """simple docstring""" warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __UpperCamelCase , ) return self.image_processor
714
def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if length <= 0 or not isinstance(__lowercase ,__lowercase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(__lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
70
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = GPTaTokenizer lowerCamelCase_ = GPTaTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = {"""add_prefix_space""": True} lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : Optional[int] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] A_ : int = dict(zip(lowercase , range(len(lowercase ) ) ) ) A_ : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A_ : Any = {'unk_token': '<unk>'} A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase ) ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = 'lower newer' A_ : Optional[Any] = 'lower newer' return input_text, output_text def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ : str = 'lower newer' A_ : Tuple = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] A_ : Any = tokenizer.tokenize(lowercase , add_prefix_space=lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : str = tokens + [tokenizer.unk_token] A_ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : List[str] = self.get_tokenizer() A_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase ) A_ : Union[str, Any] = 'lower newer' # Testing tokenization A_ : int = tokenizer.tokenize(lowercase , add_prefix_space=lowercase ) A_ : Union[str, Any] = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing conversion to ids without special tokens A_ : Any = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase ) A_ : Optional[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing conversion to ids with special tokens A_ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=lowercase ) A_ : Optional[int] = tokenizer.encode(lowercase , add_prefix_space=lowercase ) A_ : int = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing the unknown token A_ : str = tokens + [rust_tokenizer.unk_token] A_ : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def lowerCAmelCase_ ( self , *lowercase , **lowercase ): """simple docstring""" pass def lowerCAmelCase_ ( self , lowercase=1_5 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) # Simple input A_ : List[Any] = 'This is a simple input' A_ : int = ['This is a simple input 1', 'This is a simple input 2'] A_ : Optional[Any] = ('This is a simple input', 'This is a pair') A_ : Union[str, Any] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' ) # Simple input self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' ) # Simple input self.assertRaises( lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , ) # Pair input self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' ) # Pair input self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' ) # Pair input self.assertRaises( lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input A_ : int = 'This is a simple input' A_ : List[Any] = ['This is a simple input looooooooong', 'This is a simple input'] A_ : Optional[int] = ('This is a simple input', 'This is a pair') A_ : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] A_ : str = tokenizer.pad_token_id A_ : Tuple = tokenizer(lowercase , padding='max_length' , max_length=3_0 , return_tensors='np' ) A_ : str = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' ) A_ : Union[str, Any] = tokenizer(*lowercase , padding='max_length' , max_length=6_0 , return_tensors='np' ) A_ : int = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = '$$$' A_ : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase ) A_ : Tuple = 'This is a simple input' A_ : str = ['This is a simple input 1', 'This is a simple input 2'] A_ : Dict = tokenizer.bos_token_id A_ : int = tokenizer(lowercase ) A_ : Optional[int] = tokenizer(lowercase ) self.assertEqual(out_s.input_ids[0] , lowercase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) A_ : Dict = tokenizer.decode(out_s.input_ids ) A_ : List[str] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowercase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = [self.get_tokenizer(do_lower_case=lowercase , add_bos_token=lowercase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): A_ : Optional[Any] = 'Encode this.' A_ : int = 'This one too please.' A_ : Tuple = tokenizer.encode(lowercase , add_special_tokens=lowercase ) encoded_sequence += tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : Union[str, Any] = tokenizer.encode_plus( lowercase , lowercase , add_special_tokens=lowercase , return_special_tokens_mask=lowercase , ) A_ : Tuple = encoded_sequence_dict['input_ids'] A_ : Dict = encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(lowercase ) , len(lowercase ) ) A_ : Tuple = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase ) ] A_ : Optional[int] = [x for x in filtered_sequence if x is not None] self.assertEqual(lowercase , lowercase ) @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowercase ) A_ : List[Any] = 'A photo of a cat' A_ : Tuple = tokenizer.encode( lowercase , ) self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('test_opt' ) A_ : int = AutoTokenizer.from_pretrained('./test_opt' ) A_ : Optional[int] = tokenizer.encode( lowercase , ) self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=lowercase ) A_ : Optional[int] = 'A photo of a cat' A_ : Union[str, Any] = tokenizer.encode( lowercase , ) # Same as above self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowercase ) A_ : List[str] = 'bos' A_ : List[str] = tokenizer.get_vocab()['bos'] A_ : List[str] = 'A photo of a cat' A_ : int = tokenizer.encode( lowercase , ) # We changed the bos token self.assertEqual(lowercase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('./tok' ) A_ : str = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) A_ : str = tokenizer.encode( lowercase , ) self.assertEqual(lowercase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
715
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase ( ): '''simple docstring''' A_ , A_ : Any = 9, 14 # noqa: F841 A_ : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : List[Any] = defaultdict(__lowercase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A_ : Tuple = mst(__lowercase ) A_ : Tuple = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : List[Any] = tuple(answer[:2] ) A_ : Union[str, Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
70
0
import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _UpperCAmelCase = logging.get_logger(__name__) class UpperCAmelCase ( __UpperCAmelCase ): '''simple docstring''' def __init__( self , *lowercase , **lowercase ): """simple docstring""" warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' , lowercase , ) super().__init__(*lowercase , **lowercase )
716
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): '''simple docstring''' A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase ) A_ : Any = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowercase ) env_command_parser(subparsers=__lowercase ) launch_command_parser(subparsers=__lowercase ) tpu_command_parser(subparsers=__lowercase ) test_command_parser(subparsers=__lowercase ) # Let's go A_ : Optional[Any] = parser.parse_args() if not hasattr(__lowercase ,'func' ): parser.print_help() exit(1 ) # Run args.func(__lowercase ) if __name__ == "__main__": main()
70
0
import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCamelCase ( __lowercase : List[str] ): '''simple docstring''' if "img_encoder.pos_embed" in name: A_ : Union[str, Any] = name.replace('img_encoder.pos_embed' ,'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: A_ : Tuple = name.replace('img_encoder.patch_embed.proj' ,'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: A_ : Union[str, Any] = name.replace('img_encoder.patch_embed.norm' ,'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: A_ : Union[str, Any] = name.replace('img_encoder.layers' ,'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: A_ : Any = name.replace('blocks' ,'layers' ) if "attn" in name and "pre_assign" not in name: A_ : str = name.replace('attn' ,'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: A_ : Any = name.replace('proj' ,'out_proj' ) if "pre_assign_attn.attn.proj" in name: A_ : Dict = name.replace('pre_assign_attn.attn.proj' ,'pre_assign_attn.attn.out_proj' ) if "norm1" in name: A_ : List[str] = name.replace('norm1' ,'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: A_ : int = name.replace('norm2' ,'layer_norm2' ) if "img_encoder.norm" in name: A_ : Union[str, Any] = name.replace('img_encoder.norm' ,'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: A_ : Union[str, Any] = name.replace('text_encoder.token_embedding' ,'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: A_ : Tuple = name.replace('text_encoder.positional_embedding' ,'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: A_ : Any = name.replace('text_encoder.transformer.resblocks.' ,'text_model.encoder.layers.' ) if "ln_1" in name: A_ : List[Any] = name.replace('ln_1' ,'layer_norm1' ) if "ln_2" in name: A_ : int = name.replace('ln_2' ,'layer_norm2' ) if "c_fc" in name: A_ : Any = name.replace('c_fc' ,'fc1' ) if "c_proj" in name: A_ : List[str] = name.replace('c_proj' ,'fc2' ) if "text_encoder" in name: A_ : Optional[int] = name.replace('text_encoder' ,'text_model' ) if "ln_final" in name: A_ : Union[str, Any] = name.replace('ln_final' ,'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: A_ : Dict = name.replace('img_projector.linear_hidden.' ,'visual_projection.' ) if "img_projector.linear_out." in name: A_ : Tuple = name.replace('img_projector.linear_out.' ,'visual_projection.3.' ) if "text_projector.linear_hidden" in name: A_ : Dict = name.replace('text_projector.linear_hidden' ,'text_projection' ) if "text_projector.linear_out" in name: A_ : Optional[int] = name.replace('text_projector.linear_out' ,'text_projection.3' ) return name def UpperCamelCase ( __lowercase : Union[str, Any] ,__lowercase : Tuple ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A_ : Tuple = orig_state_dict.pop(__lowercase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ : Any = key.split('.' ) A_ : Any = int(key_split[2] ), int(key_split[4] ) A_ : List[str] = config.vision_config.hidden_size if "weight" in key: A_ : Union[str, Any] = val[:dim, :] A_ : Dict = val[dim : dim * 2, :] A_ : Dict = val[-dim:, :] else: A_ : Tuple = val[:dim] A_ : Tuple = val[dim : dim * 2] A_ : Optional[Any] = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors A_ : List[Any] = key.split('.' ) A_ : Any = int(key_split[3] ) A_ : Optional[Any] = config.text_config.hidden_size if "weight" in key: A_ : List[Any] = val[:dim, :] A_ : List[str] = val[ dim : dim * 2, : ] A_ : Union[str, Any] = val[-dim:, :] else: A_ : Dict = val[:dim] A_ : List[str] = val[dim : dim * 2] A_ : List[Any] = val[-dim:] else: A_ : Dict = rename_key(__lowercase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): A_ : Optional[Any] = val.squeeze_() else: A_ : Dict = val return orig_state_dict def UpperCamelCase ( ): '''simple docstring''' A_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A_ : Dict = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw ) return im @torch.no_grad() def UpperCamelCase ( __lowercase : Dict ,__lowercase : Tuple ,__lowercase : Any="groupvit-gcc-yfcc" ,__lowercase : List[str]=False ): '''simple docstring''' A_ : List[Any] = GroupViTConfig() A_ : Optional[int] = GroupViTModel(__lowercase ).eval() A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )['''model'''] A_ : List[Any] = convert_state_dict(__lowercase ,__lowercase ) A_ : Any = model.load_state_dict(__lowercase ,strict=__lowercase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__lowercase ) == 0) # verify result A_ : Optional[int] = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) A_ : Optional[int] = prepare_img() A_ : Dict = processor(text=['a photo of a cat', 'a photo of a dog'] ,images=__lowercase ,padding=__lowercase ,return_tensors='pt' ) with torch.no_grad(): A_ : Union[str, Any] = model(**__lowercase ) if model_name == "groupvit-gcc-yfcc": A_ : Union[str, Any] = torch.tensor([[13.35_23, 6.36_29]] ) elif model_name == "groupvit-gcc-redcaps": A_ : Any = torch.tensor([[16.18_73, 8.62_30]] ) else: raise ValueError(f'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image ,__lowercase ,atol=1e-3 ) processor.save_pretrained(__lowercase ) model.save_pretrained(__lowercase ) print('Successfully saved processor and model to' ,__lowercase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(__lowercase ,organization='nielsr' ) model.push_to_hub(__lowercase ,organization='nielsr' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _UpperCAmelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
717
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = DistilBertTokenizer lowerCamelCase_ = DistilBertTokenizerFast lowerCamelCase_ = True @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
70
0
'''simple docstring''' import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib _UpperCAmelCase = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL, } _UpperCAmelCase = logging.WARNING def UpperCamelCase ( ): '''simple docstring''' A_ : List[str] = os.getenv('DATASETS_VERBOSITY' ,snake_case_ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f'''Unknown option DATASETS_VERBOSITY={env_level_str}, ''' f'''has to be one of: { ', '.join(log_levels.keys() ) }''' ) return _default_log_level def UpperCamelCase ( ): '''simple docstring''' return __name__.split('.' )[0] def UpperCamelCase ( ): '''simple docstring''' return logging.getLogger(_get_library_name() ) def UpperCamelCase ( ): '''simple docstring''' A_ : str = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def UpperCamelCase ( ): '''simple docstring''' A_ : Any = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def UpperCamelCase ( __lowercase : Optional[str] = None ): '''simple docstring''' if name is None: A_ : Dict = _get_library_name() return logging.getLogger(snake_case_ ) def UpperCamelCase ( ): '''simple docstring''' return _get_library_root_logger().getEffectiveLevel() def UpperCamelCase ( __lowercase : int ): '''simple docstring''' _get_library_root_logger().setLevel(snake_case_ ) def UpperCamelCase ( ): '''simple docstring''' return set_verbosity(snake_case_ ) def UpperCamelCase ( ): '''simple docstring''' return set_verbosity(snake_case_ ) def UpperCamelCase ( ): '''simple docstring''' return set_verbosity(snake_case_ ) def UpperCamelCase ( ): '''simple docstring''' return set_verbosity(snake_case_ ) def UpperCamelCase ( ): '''simple docstring''' A_ : Tuple = False def UpperCamelCase ( ): '''simple docstring''' A_ : Tuple = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCAmelCase : '''simple docstring''' def __init__( self , *lowercase , **lowercase ): # pylint: disable=unused-argument """simple docstring""" A_ : List[Any] = args[0] if args else None def __iter__( self ): """simple docstring""" return iter(self._iterator ) def __getattr__( self , lowercase ): """simple docstring""" def empty_fn(*lowercase , **lowercase ): # pylint: disable=unused-argument return return empty_fn def __enter__( self ): """simple docstring""" return self def __exit__( self , lowercase , lowercase , lowercase ): """simple docstring""" return _UpperCAmelCase = True class UpperCAmelCase : '''simple docstring''' def __call__( self , *lowercase , lowercase=False , **lowercase ): """simple docstring""" if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowercase , **lowercase ) else: return EmptyTqdm(*lowercase , **lowercase ) def lowerCAmelCase_ ( self , *lowercase , **lowercase ): """simple docstring""" A_ : List[Any] = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowercase , **lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() _UpperCAmelCase = _tqdm_cls() def UpperCamelCase ( ): '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def UpperCamelCase ( ): '''simple docstring''' global _tqdm_active A_ : str = True def UpperCamelCase ( ): '''simple docstring''' global _tqdm_active A_ : Any = False
718
import random def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Tuple = num - 1 A_ : Optional[Any] = 0 while s % 2 == 0: A_ : Optional[int] = s // 2 t += 1 for _ in range(5 ): A_ : Optional[int] = random.randrange(2 ,num - 1 ) A_ : Any = pow(__lowercase ,__lowercase ,__lowercase ) if v != 1: A_ : List[str] = 0 while v != (num - 1): if i == t - 1: return False else: A_ : Union[str, Any] = i + 1 A_ : Tuple = (v**2) % num return True def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if num < 2: return False A_ : Optional[Any] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowercase ) def UpperCamelCase ( __lowercase : int = 10_24 ): '''simple docstring''' while True: A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) ) if is_prime_low_num(__lowercase ): return num if __name__ == "__main__": _UpperCAmelCase = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
70
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = 1 A_ : str = 3 A_ : Union[str, Any] = (3_2, 3_2) A_ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase ) return image @property def lowerCAmelCase_ ( self ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=lowercase , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , ) return model @property def lowerCAmelCase_ ( self ): """simple docstring""" torch.manual_seed(0 ) A_ : List[Any] = AutoencoderKL( block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) return model @property def lowerCAmelCase_ ( self ): """simple docstring""" torch.manual_seed(0 ) A_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , ) return CLIPTextModel(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator A_ : Union[str, Any] = self.dummy_cond_unet_upscale A_ : str = DDPMScheduler() A_ : List[Any] = DDIMScheduler(prediction_type='v_prediction' ) A_ : List[Any] = self.dummy_vae A_ : Union[str, Any] = self.dummy_text_encoder A_ : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A_ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : List[str] = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk A_ : Dict = StableDiffusionUpscalePipeline( unet=lowercase , low_res_scheduler=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , max_noise_level=3_5_0 , ) A_ : Optional[int] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = 'A painting of a squirrel eating a burger' A_ : Optional[Any] = torch.Generator(device=lowercase ).manual_seed(0 ) A_ : List[Any] = sd_pipe( [prompt] , image=lowercase , generator=lowercase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , ) A_ : List[Any] = output.images A_ : Union[str, Any] = torch.Generator(device=lowercase ).manual_seed(0 ) A_ : Any = sd_pipe( [prompt] , image=lowercase , generator=lowercase , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , return_dict=lowercase , )[0] A_ : Optional[int] = image[0, -3:, -3:, -1] A_ : str = image_from_tuple[0, -3:, -3:, -1] A_ : List[str] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) A_ : List[str] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator A_ : Optional[Any] = self.dummy_cond_unet_upscale A_ : Optional[Any] = DDPMScheduler() A_ : Dict = DDIMScheduler(prediction_type='v_prediction' ) A_ : Union[str, Any] = self.dummy_vae A_ : int = self.dummy_text_encoder A_ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A_ : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Dict = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((6_4, 6_4) ) # make sure here that pndm scheduler skips prk A_ : Tuple = StableDiffusionUpscalePipeline( unet=lowercase , low_res_scheduler=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , max_noise_level=3_5_0 , ) A_ : Optional[Any] = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : str = 'A painting of a squirrel eating a burger' A_ : Optional[int] = sd_pipe( 2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , ) A_ : Tuple = output.images assert image.shape[0] == 2 A_ : Any = torch.Generator(device=lowercase ).manual_seed(0 ) A_ : Any = sd_pipe( [prompt] , image=lowercase , generator=lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='np' , ) A_ : Any = output.images assert image.shape[0] == 2 @unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = self.dummy_cond_unet_upscale A_ : str = DDPMScheduler() A_ : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' ) A_ : Optional[Any] = self.dummy_vae A_ : List[Any] = self.dummy_text_encoder A_ : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) A_ : int = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0] A_ : Optional[int] = Image.fromarray(np.uinta(lowercase ) ).convert('RGB' ).resize((6_4, 6_4) ) # put models in fp16, except vae as it overflows in fp16 A_ : str = unet.half() A_ : Any = text_encoder.half() # make sure here that pndm scheduler skips prk A_ : List[str] = StableDiffusionUpscalePipeline( unet=lowercase , low_res_scheduler=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , max_noise_level=3_5_0 , ) A_ : Dict = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) A_ : List[Any] = 'A painting of a squirrel eating a burger' A_ : int = torch.manual_seed(0 ) A_ : Optional[Any] = sd_pipe( [prompt] , image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='np' , ).images A_ : Union[str, Any] = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) @slow @require_torch_gpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) A_ : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat.npy' ) A_ : Dict = 'stabilityai/stable-diffusion-x4-upscaler' A_ : Dict = StableDiffusionUpscalePipeline.from_pretrained(lowercase ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : List[Any] = 'a cat sitting on a park bench' A_ : int = torch.manual_seed(0 ) A_ : Optional[int] = pipe( prompt=lowercase , image=lowercase , generator=lowercase , output_type='np' , ) A_ : Dict = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1E-3 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) A_ : Union[str, Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale' '/upsampled_cat_fp16.npy' ) A_ : Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler' A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained( lowercase , torch_dtype=torch.floataa , ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing() A_ : List[str] = 'a cat sitting on a park bench' A_ : str = torch.manual_seed(0 ) A_ : Any = pipe( prompt=lowercase , image=lowercase , generator=lowercase , output_type='np' , ) A_ : Union[str, Any] = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def lowerCAmelCase_ ( self ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() A_ : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-upscale/low_res_cat.png' ) A_ : str = 'stabilityai/stable-diffusion-x4-upscaler' A_ : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained( lowercase , torch_dtype=torch.floataa , ) pipe.to(lowercase ) pipe.set_progress_bar_config(disable=lowercase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() A_ : Tuple = 'a cat sitting on a park bench' A_ : List[Any] = torch.manual_seed(0 ) A_ : Optional[Any] = pipe( prompt=lowercase , image=lowercase , generator=lowercase , num_inference_steps=5 , output_type='np' , ) A_ : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 1_0**9
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
import unittest from transformers import DonutProcessor _UpperCAmelCase = """naver-clova-ix/donut-base""" class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DonutProcessor.from_pretrained(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } A_ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) A_ : Dict = self.processor.tokenajson(lowercase ) self.assertDictEqual(lowercase , lowercase )
720
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = FlaxAutoencoderKL @property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = 4 A_ : int = 3 A_ : List[str] = (3_2, 3_2) A_ : Any = jax.random.PRNGKey(0 ) A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } A_ : int = self.dummy_input return init_dict, inputs_dict
70
0
_UpperCAmelCase = """Tobias Carryer""" from time import time class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase=int(time() ) ): # noqa: B008 """simple docstring""" A_ : Dict = multiplier A_ : List[str] = increment A_ : Any = modulo A_ : Union[str, Any] = seed def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. _UpperCAmelCase = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31) while True: print(lcg.next_number())
721
import numpy as np _UpperCAmelCase = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" A_ : Any = np.array(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE ) A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : int = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = message.lower() A_ : Tuple = message.replace(' ' , '' ) A_ : int = message.replace('j' , 'i' ) A_ : Any = np.empty((2, len(lowercase )) ) for letter_index in range(len(lowercase ) ): A_ : Optional[int] = self.letter_to_numbers(message[letter_index] ) A_ : Union[str, Any] = numbers[0] A_ : Union[str, Any] = numbers[1] A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) ) A_ : int = '' for numbers_index in range(len(lowercase ) ): A_ : str = int(second_step[numbers_index * 2] ) A_ : str = int(second_step[(numbers_index * 2) + 1] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : Tuple = encoded_message + letter return encoded_message def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[int] = message.lower() message.replace(' ' , '' ) A_ : Tuple = np.empty(2 * len(lowercase ) ) for letter_index in range(len(lowercase ) ): A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] ) A_ : Optional[int] = numbers[0] A_ : Dict = numbers[1] A_ : Optional[int] = first_step.reshape((2, len(lowercase )) ) A_ : List[str] = '' for numbers_index in range(len(lowercase ) ): A_ : List[Any] = int(second_step[0, numbers_index] ) A_ : Optional[int] = int(second_step[1, numbers_index] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : str = decoded_message + letter return decoded_message
70
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _UpperCAmelCase = { """vocab_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""", }, """tokenizer_file""": { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json""" ), """google/realm-orqa-nq-openqa""": ( """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-nq-reader""": ( """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-openqa""": ( """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json""" ), """google/realm-orqa-wq-reader""": ( """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json""" ), }, } _UpperCAmelCase = { """google/realm-cc-news-pretrained-embedder""": 512, """google/realm-cc-news-pretrained-encoder""": 512, """google/realm-cc-news-pretrained-scorer""": 512, """google/realm-cc-news-pretrained-openqa""": 512, """google/realm-orqa-nq-openqa""": 512, """google/realm-orqa-nq-reader""": 512, """google/realm-orqa-wq-openqa""": 512, """google/realm-orqa-wq-reader""": 512, } _UpperCAmelCase = { """google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True}, """google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-nq-reader""": {"""do_lower_case""": True}, """google/realm-orqa-wq-openqa""": {"""do_lower_case""": True}, """google/realm-orqa-wq-reader""": {"""do_lower_case""": True}, } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = RealmTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ): """simple docstring""" super().__init__( lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , ) A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , lowercase ) != do_lower_case or normalizer_state.get('strip_accents' , lowercase ) != strip_accents or normalizer_state.get('handle_chinese_chars' , lowercase ) != tokenize_chinese_chars ): A_ : Optional[int] = getattr(lowercase , normalizer_state.pop('type' ) ) A_ : Optional[Any] = do_lower_case A_ : Union[str, Any] = strip_accents A_ : Optional[int] = tokenize_chinese_chars A_ : Union[str, Any] = normalizer_class(**lowercase ) A_ : Tuple = do_lower_case def lowerCAmelCase_ ( self , lowercase , **lowercase ): """simple docstring""" A_ : Optional[int] = PaddingStrategy.MAX_LENGTH A_ : Optional[int] = text A_ : int = kwargs.pop('text_pair' , lowercase ) A_ : List[Any] = kwargs.pop('return_tensors' , lowercase ) A_ : Any = { 'input_ids': [], 'attention_mask': [], 'token_type_ids': [], } for idx, candidate_text in enumerate(lowercase ): if batch_text_pair is not None: A_ : Union[str, Any] = batch_text_pair[idx] else: A_ : List[str] = None A_ : Dict = super().__call__(lowercase , lowercase , return_tensors=lowercase , **lowercase ) A_ : Tuple = encoded_candidates.get('input_ids' ) A_ : Dict = encoded_candidates.get('attention_mask' ) A_ : str = encoded_candidates.get('token_type_ids' ) if encoded_input_ids is not None: output_data["input_ids"].append(lowercase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(lowercase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(lowercase ) A_ : List[Any] = {key: item for key, item in output_data.items() if len(lowercase ) != 0} return BatchEncoding(lowercase , tensor_type=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase=None ): """simple docstring""" A_ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase_ ( self , lowercase , lowercase = None ): """simple docstring""" A_ : Union[str, Any] = [self.sep_token_id] A_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self , lowercase , lowercase = None ): """simple docstring""" A_ : str = self._tokenizer.model.save(lowercase , name=lowercase ) return tuple(lowercase )
700
from math import sqrt def UpperCamelCase ( __lowercase : int = 1_00_00_00 ): '''simple docstring''' A_ : int = 0 A_ : int = 0 A_ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowercase ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
70
0
import math import random from typing import Any from .hill_climbing import SearchProblem def UpperCamelCase ( __lowercase ,__lowercase = True ,__lowercase = math.inf ,__lowercase = -math.inf ,__lowercase = math.inf ,__lowercase = -math.inf ,__lowercase = False ,__lowercase = 1_00 ,__lowercase = 0.01 ,__lowercase = 1 ,): '''simple docstring''' A_ : str = False A_ : Any = search_prob A_ : Optional[Any] = start_temperate A_ : str = [] A_ : List[str] = 0 A_ : Optional[int] = None while not search_end: A_ : Optional[Any] = current_state.score() if best_state is None or current_score > best_state.score(): A_ : str = current_state scores.append(__lowercase ) iterations += 1 A_ : Optional[Any] = None A_ : Any = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to A_ : Optional[int] = random.randint(0 ,len(__lowercase ) - 1 ) # picking a random neighbor A_ : Union[str, Any] = neighbors.pop(__lowercase ) A_ : List[Any] = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: A_ : Union[str, Any] = change * -1 # in case we are finding minimum if change > 0: # improves the solution A_ : Union[str, Any] = picked_neighbor else: A_ : Optional[Any] = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability A_ : List[str] = picked_neighbor A_ : Dict = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor A_ : str = True else: A_ : List[Any] = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__lowercase ) ,__lowercase ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def UpperCamelCase ( __lowercase ,__lowercase ): '''simple docstring''' return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) _UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) # starting the problem with initial coordinates (12, 47) _UpperCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( """The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """ F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}""" ) def UpperCamelCase ( __lowercase ,__lowercase ): '''simple docstring''' return (3 * x**2) - (6 * y) _UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing(prob, find_max=False, visualization=True) print( """The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" ) _UpperCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) _UpperCAmelCase = simulated_annealing(prob, find_max=True, visualization=True) print( """The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """ F"""{local_min.score()}""" )
701
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ): """simple docstring""" super().__init__() A_ : Tuple = initial_learning_rate A_ : List[str] = warmup_steps A_ : int = power A_ : Dict = decay_schedule_fn A_ : Any = name def __call__( self , lowercase ): """simple docstring""" with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A_ : Optional[int] = tf.cast(lowercase , tf.floataa ) A_ : int = tf.cast(self.warmup_steps , tf.floataa ) A_ : Optional[int] = global_step_float / warmup_steps_float A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,): '''simple docstring''' A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,) if num_warmup_steps: A_ : Tuple = WarmUp( initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,) if weight_decay_rate > 0.0: A_ : Union[str, Any] = AdamWeightDecay( learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,) else: A_ : Dict = tf.keras.optimizers.Adam( learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ): """simple docstring""" super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase ) A_ : Dict = weight_decay_rate A_ : Union[str, Any] = include_in_weight_decay A_ : str = exclude_from_weight_decay @classmethod def lowerCAmelCase_ ( cls , lowercase ): """simple docstring""" A_ : Tuple = {'WarmUp': WarmUp} return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase ) A_ : Optional[Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ , A_ : Optional[int] = list(zip(*lowercase ) ) return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} A_ : List[str] = apply_state or {} A_ : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: A_ : Dict = self._fallback_apply_state(lowercase , lowercase ) A_ : int = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase , lowercase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase , lowercase ) is not None: return False return True class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self ): """simple docstring""" A_ : int = [] A_ : Optional[int] = None @property def lowerCAmelCase_ ( self ): """simple docstring""" if self._accum_steps is None: A_ : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowercase ): """simple docstring""" if not self._gradients: A_ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' ) for accum_gradient, gradient in zip(self._gradients , lowercase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase ) self._accum_steps.assign_add(1 ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase ) )
70
0
from math import log from scipy.constants import Boltzmann, physical_constants _UpperCAmelCase = 300 # TEMPERATURE (unit = K) def UpperCamelCase ( __lowercase : float ,__lowercase : float ,__lowercase : float ,): '''simple docstring''' if donor_conc <= 0: raise ValueError('Donor concentration should be positive' ) elif acceptor_conc <= 0: raise ValueError('Acceptor concentration should be positive' ) elif intrinsic_conc <= 0: raise ValueError('Intrinsic concentration should be positive' ) elif donor_conc <= intrinsic_conc: raise ValueError( 'Donor concentration should be greater than intrinsic concentration' ) elif acceptor_conc <= intrinsic_conc: raise ValueError( 'Acceptor concentration should be greater than intrinsic concentration' ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
702
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : Any = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[str] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
70
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( __lowercase : List[Any] ): '''simple docstring''' A_ : str = DPTConfig(embedding_type='hybrid' ) if "large" in checkpoint_url: A_ : Any = 10_24 A_ : Tuple = 40_96 A_ : Optional[Any] = 24 A_ : Optional[int] = 16 A_ : str = [5, 11, 17, 23] A_ : Any = [2_56, 5_12, 10_24, 10_24] A_ : Optional[Any] = (1, 3_84, 3_84) if "nyu" or "midas" in checkpoint_url: A_ : Dict = 7_68 A_ : Union[str, Any] = [1, 1, 1, 0.5] A_ : Dict = [2_56, 5_12, 7_68, 7_68] A_ : Tuple = 1_50 A_ : int = 16 A_ : List[str] = (1, 3_84, 3_84) A_ : Any = False A_ : str = 'project' if "ade" in checkpoint_url: A_ : List[Any] = True A_ : Union[str, Any] = 7_68 A_ : str = [1, 1, 1, 0.5] A_ : List[str] = 1_50 A_ : Optional[int] = 16 A_ : str = 'huggingface/label-files' A_ : List[Any] = 'ade20k-id2label.json' A_ : str = json.load(open(cached_download(hf_hub_url(__lowercase ,__lowercase ,repo_type='dataset' ) ) ,'r' ) ) A_ : Tuple = {int(__lowercase ): v for k, v in idalabel.items()} A_ : str = idalabel A_ : Optional[int] = {v: k for k, v in idalabel.items()} A_ : Union[str, Any] = [1, 1_50, 4_80, 4_80] return config, expected_shape def UpperCamelCase ( __lowercase : Union[str, Any] ): '''simple docstring''' A_ : int = ['pretrained.model.head.weight', 'pretrained.model.head.bias'] for k in ignore_keys: state_dict.pop(__lowercase ,__lowercase ) def UpperCamelCase ( __lowercase : List[Any] ): '''simple docstring''' if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): A_ : Union[str, Any] = name.replace('pretrained.model' ,'dpt.encoder' ) if "pretrained.model" in name: A_ : Dict = name.replace('pretrained.model' ,'dpt.embeddings' ) if "patch_embed" in name: A_ : Optional[Any] = name.replace('patch_embed' ,'' ) if "pos_embed" in name: A_ : int = name.replace('pos_embed' ,'position_embeddings' ) if "attn.proj" in name: A_ : str = name.replace('attn.proj' ,'attention.output.dense' ) if "proj" in name and "project" not in name: A_ : Optional[int] = name.replace('proj' ,'projection' ) if "blocks" in name: A_ : Dict = name.replace('blocks' ,'layer' ) if "mlp.fc1" in name: A_ : Optional[int] = name.replace('mlp.fc1' ,'intermediate.dense' ) if "mlp.fc2" in name: A_ : int = name.replace('mlp.fc2' ,'output.dense' ) if "norm1" in name and "backbone" not in name: A_ : Tuple = name.replace('norm1' ,'layernorm_before' ) if "norm2" in name and "backbone" not in name: A_ : str = name.replace('norm2' ,'layernorm_after' ) if "scratch.output_conv" in name: A_ : Any = name.replace('scratch.output_conv' ,'head' ) if "scratch" in name: A_ : Optional[int] = name.replace('scratch' ,'neck' ) if "layer1_rn" in name: A_ : Dict = name.replace('layer1_rn' ,'convs.0' ) if "layer2_rn" in name: A_ : List[Any] = name.replace('layer2_rn' ,'convs.1' ) if "layer3_rn" in name: A_ : List[Any] = name.replace('layer3_rn' ,'convs.2' ) if "layer4_rn" in name: A_ : Union[str, Any] = name.replace('layer4_rn' ,'convs.3' ) if "refinenet" in name: A_ : str = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 A_ : Optional[Any] = name.replace(f'''refinenet{layer_idx}''' ,f'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: A_ : Dict = name.replace('out_conv' ,'projection' ) if "resConfUnit1" in name: A_ : Union[str, Any] = name.replace('resConfUnit1' ,'residual_layer1' ) if "resConfUnit2" in name: A_ : Optional[Any] = name.replace('resConfUnit2' ,'residual_layer2' ) if "conv1" in name: A_ : List[str] = name.replace('conv1' ,'convolution1' ) if "conv2" in name: A_ : List[str] = name.replace('conv2' ,'convolution2' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: A_ : Union[str, Any] = name.replace('pretrained.act_postprocess1.0.project.0' ,'neck.reassemble_stage.readout_projects.0.0' ) if "pretrained.act_postprocess2.0.project.0" in name: A_ : Union[str, Any] = name.replace('pretrained.act_postprocess2.0.project.0' ,'neck.reassemble_stage.readout_projects.1.0' ) if "pretrained.act_postprocess3.0.project.0" in name: A_ : List[str] = name.replace('pretrained.act_postprocess3.0.project.0' ,'neck.reassemble_stage.readout_projects.2.0' ) if "pretrained.act_postprocess4.0.project.0" in name: A_ : Tuple = name.replace('pretrained.act_postprocess4.0.project.0' ,'neck.reassemble_stage.readout_projects.3.0' ) # resize blocks if "pretrained.act_postprocess1.3" in name: A_ : Dict = name.replace('pretrained.act_postprocess1.3' ,'neck.reassemble_stage.layers.0.projection' ) if "pretrained.act_postprocess1.4" in name: A_ : str = name.replace('pretrained.act_postprocess1.4' ,'neck.reassemble_stage.layers.0.resize' ) if "pretrained.act_postprocess2.3" in name: A_ : Dict = name.replace('pretrained.act_postprocess2.3' ,'neck.reassemble_stage.layers.1.projection' ) if "pretrained.act_postprocess2.4" in name: A_ : List[Any] = name.replace('pretrained.act_postprocess2.4' ,'neck.reassemble_stage.layers.1.resize' ) if "pretrained.act_postprocess3.3" in name: A_ : Optional[int] = name.replace('pretrained.act_postprocess3.3' ,'neck.reassemble_stage.layers.2.projection' ) if "pretrained.act_postprocess4.3" in name: A_ : List[str] = name.replace('pretrained.act_postprocess4.3' ,'neck.reassemble_stage.layers.3.projection' ) if "pretrained.act_postprocess4.4" in name: A_ : List[Any] = name.replace('pretrained.act_postprocess4.4' ,'neck.reassemble_stage.layers.3.resize' ) if "pretrained" in name: A_ : str = name.replace('pretrained' ,'dpt' ) if "bn" in name: A_ : Any = name.replace('bn' ,'batch_norm' ) if "head" in name: A_ : Union[str, Any] = name.replace('head' ,'head.head' ) if "encoder.norm" in name: A_ : str = name.replace('encoder.norm' ,'layernorm' ) if "auxlayer" in name: A_ : Optional[int] = name.replace('auxlayer' ,'auxiliary_head.head' ) if "backbone" in name: A_ : List[str] = name.replace('backbone' ,'backbone.bit.encoder' ) if ".." in name: A_ : Tuple = name.replace('..' ,'.' ) if "stem.conv" in name: A_ : List[str] = name.replace('stem.conv' ,'bit.embedder.convolution' ) if "blocks" in name: A_ : Optional[Any] = name.replace('blocks' ,'layers' ) if "convolution" in name and "backbone" in name: A_ : Optional[int] = name.replace('convolution' ,'conv' ) if "layer" in name and "backbone" in name: A_ : Any = name.replace('layer' ,'layers' ) if "backbone.bit.encoder.bit" in name: A_ : Dict = name.replace('backbone.bit.encoder.bit' ,'backbone.bit' ) if "embedder.conv" in name: A_ : List[Any] = name.replace('embedder.conv' ,'embedder.convolution' ) if "backbone.bit.encoder.stem.norm" in name: A_ : Optional[Any] = name.replace('backbone.bit.encoder.stem.norm' ,'backbone.bit.embedder.norm' ) return name def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A_ : Dict = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) A_ : Dict = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict A_ : Dict = in_proj_weight[: config.hidden_size, :] A_ : List[Any] = in_proj_bias[: config.hidden_size] A_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A_ : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A_ : Union[str, Any] = in_proj_weight[ -config.hidden_size :, : ] A_ : List[Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase ( ): '''simple docstring''' A_ : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg' A_ : Tuple = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw ) return im @torch.no_grad() def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : str ,__lowercase : Dict ,__lowercase : List[Any] ,__lowercase : List[Any] ): '''simple docstring''' A_ : Any = get_dpt_config(__lowercase ) # load original state_dict from URL # state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu") A_ : Optional[Any] = torch.load(__lowercase ,map_location='cpu' ) # remove certain keys remove_ignore_keys_(__lowercase ) # rename keys for key in state_dict.copy().keys(): A_ : List[Any] = state_dict.pop(__lowercase ) A_ : str = val # read in qkv matrices read_in_q_k_v(__lowercase ,__lowercase ) # load HuggingFace model A_ : Optional[Any] = DPTForSemanticSegmentation(__lowercase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__lowercase ) model.load_state_dict(__lowercase ) model.eval() # Check outputs on an image A_ : Optional[int] = 4_80 if 'ade' in checkpoint_url else 3_84 A_ : Optional[Any] = DPTImageProcessor(size=__lowercase ) A_ : Any = prepare_img() A_ : Tuple = image_processor(__lowercase ,return_tensors='pt' ) # forward pass A_ : Union[str, Any] = model(**__lowercase ).logits if 'ade' in checkpoint_url else model(**__lowercase ).predicted_depth if show_prediction: A_ : Optional[Any] = ( torch.nn.functional.interpolate( outputs.unsqueeze(1 ) ,size=(image.size[1], image.size[0]) ,mode='bicubic' ,align_corners=__lowercase ,) .squeeze() .cpu() .numpy() ) Image.fromarray((prediction / prediction.max()) * 2_55 ).show() if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowercase ) if push_to_hub: model.push_to_hub('ybelkada/dpt-hybrid-midas' ) image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) parser.add_argument( """--show_prediction""", action="""store_true""", ) _UpperCAmelCase = parser.parse_args() convert_dpt_checkpoint( args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction )
703
def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : int = len(__lowercase ) A_ : List[Any] = sum(__lowercase ) A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 ,n + 1 ): A_ : Optional[Any] = True for i in range(1 ,s + 1 ): A_ : Tuple = False for i in range(1 ,n + 1 ): for j in range(1 ,s + 1 ): A_ : Dict = dp[i][j - 1] if arr[i - 1] <= j: A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) ,-1 ,-1 ): if dp[n][j] is True: A_ : List[Any] = s - 2 * j break return diff
70
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _UpperCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = _ask_options( 'In which compute environment are you running?' ,['This machine', 'AWS (Amazon SageMaker)'] ,_convert_compute_environment ,) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A_ : Optional[Any] = get_sagemaker_input() else: A_ : Optional[Any] = get_cluster_input() return config def UpperCamelCase ( __lowercase : List[Any]=None ): '''simple docstring''' if subparsers is not None: A_ : List[str] = subparsers.add_parser('config' ,description=__lowercase ) else: A_ : Optional[int] = argparse.ArgumentParser('Accelerate config command' ,description=__lowercase ) parser.add_argument( '--config_file' ,default=__lowercase ,help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) ,) if subparsers is not None: parser.set_defaults(func=__lowercase ) return parser def UpperCamelCase ( __lowercase : Optional[Any] ): '''simple docstring''' A_ : Union[str, Any] = get_user_input() if args.config_file is not None: A_ : Dict = args.config_file else: if not os.path.isdir(__lowercase ): os.makedirs(__lowercase ) A_ : List[str] = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(__lowercase ) else: config.to_yaml_file(__lowercase ) print(f'''accelerate configuration saved at {config_file}''' ) def UpperCamelCase ( ): '''simple docstring''' A_ : str = config_command_parser() A_ : Optional[int] = parser.parse_args() config_command(__lowercase ) if __name__ == "__main__": main()
704
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : List[Any] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) ) self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : str = get_activation('gelu' ) A_ : int = get_activation('gelu_10' ) A_ : Optional[int] = torch_builtin(lowercase ) A_ : Tuple = geluaa(lowercase ) A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(lowercase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase_ ( self ): """simple docstring""" get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(lowercase ): get_activation('bogus' ) with self.assertRaises(lowercase ): get_activation(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = get_activation('gelu' ) A_ : List[str] = 1 A_ : Optional[Any] = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(lowercase ): A_ : str = acta.a
70
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCAmelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
705
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """RegNetConfig""" # Base docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , ) A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[str] = self.convolution(self.padding(lowercase ) ) A_ : List[str] = self.normalization(lowercase ) A_ : List[Any] = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[int] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[int] = self.embedder(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' ) A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" return self.normalization(self.convolution(lowercase ) , training=lowercase ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) A_ : Optional[Any] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = self.pooler(lowercase ) for layer_module in self.attention: A_ : Optional[Any] = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : Optional[int] = max(1 , out_channels // config.groups_width ) A_ : List[Any] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Optional[int] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ), ] A_ : List[str] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : int = layer_module(lowercase ) A_ : Union[str, Any] = self.shortcut(lowercase ) hidden_state += residual A_ : Dict = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : int = max(1 , out_channels // config.groups_width ) A_ : Optional[int] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) A_ : List[str] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ), ] A_ : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = hidden_state for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer A_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ), *[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ): """simple docstring""" A_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Dict = hidden_states + (hidden_state,) A_ : List[Any] = stage_module(lowercase ) if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' lowerCamelCase_ = RegNetConfig def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[Any] = config A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' ) A_ : str = TFRegNetEncoder(lowercase , name='encoder' ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase ) A_ : Optional[int] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Dict = encoder_outputs[0] A_ : List[Any] = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = RegNetConfig lowerCamelCase_ = '''regnet''' lowerCamelCase_ = '''pixel_values''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _UpperCAmelCase = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : int = TFRegNetMainLayer(lowercase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Tuple = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) class UpperCAmelCase ( __A , __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : List[Any] = config.num_labels A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' ) # classification head A_ : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : List[Any] = self.classifier[0](lowercase ) A_ : Union[str, Any] = self.classifier[1](lowercase ) A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
70
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCAmelCase = { """configuration_xlm_roberta_xl""": [ """XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaXLConfig""", """XLMRobertaXLOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaXLForCausalLM""", """XLMRobertaXLForMaskedLM""", """XLMRobertaXLForMultipleChoice""", """XLMRobertaXLForQuestionAnswering""", """XLMRobertaXLForSequenceClassification""", """XLMRobertaXLForTokenClassification""", """XLMRobertaXLModel""", """XLMRobertaXLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
706
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
def UpperCamelCase ( __lowercase : list[int] ,__lowercase : list[int] ,__lowercase : int ): '''simple docstring''' return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(__lowercase ) ) def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : int ,__lowercase : list[int] ,__lowercase : int ): '''simple docstring''' if index == len(__lowercase ): return True # Recursive Step for i in range(__lowercase ): if valid_coloring(graph[index] ,__lowercase ,__lowercase ): # Color current vertex A_ : Optional[int] = i # Validate coloring if util_color(__lowercase ,__lowercase ,__lowercase ,index + 1 ): return True # Backtrack A_ : List[str] = -1 return False def UpperCamelCase ( __lowercase : list[list[int]] ,__lowercase : int ): '''simple docstring''' A_ : int = [-1] * len(__lowercase ) if util_color(__lowercase ,__lowercase ,__lowercase ,0 ): return colored_vertices return []
707
def UpperCamelCase ( __lowercase : list ): '''simple docstring''' A_ : str = len(__lowercase ) for _ in range(__lowercase ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: A_ , A_ : Optional[Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _UpperCAmelCase = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
70
0
# Algorithm for the pigeonhole sorting def UpperCamelCase ( __lowercase : Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = min(__lowercase ) # min() finds the minimum value A_ : Tuple = max(__lowercase ) # max() finds the maximum value A_ : str = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size A_ : Tuple = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__lowercase ,__lowercase ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. A_ : str = 0 for count in range(__lowercase ): while holes[count] > 0: holes[count] -= 1 A_ : Tuple = count + min_val i += 1 def UpperCamelCase ( ): '''simple docstring''' A_ : Any = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__lowercase ) print('Sorted order is:' ,' '.join(__lowercase ) ) if __name__ == "__main__": main()
708
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''wavlm''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : List[Any] = hidden_size A_ : Tuple = feat_extract_norm A_ : Dict = feat_extract_activation A_ : Optional[Any] = list(lowercase ) A_ : Union[str, Any] = list(lowercase ) A_ : List[str] = list(lowercase ) A_ : str = conv_bias A_ : Tuple = num_buckets A_ : Union[str, Any] = max_bucket_distance A_ : int = num_conv_pos_embeddings A_ : str = num_conv_pos_embedding_groups A_ : str = len(self.conv_dim ) A_ : Tuple = num_hidden_layers A_ : Tuple = intermediate_size A_ : Optional[Any] = hidden_act A_ : Optional[Any] = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Optional[Any] = activation_dropout A_ : Optional[int] = feat_proj_dropout A_ : List[Any] = final_dropout A_ : Union[str, Any] = layerdrop A_ : Dict = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : str = num_ctc_classes A_ : Any = vocab_size A_ : str = do_stable_layer_norm A_ : int = use_weighted_layer_sum A_ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : List[str] = apply_spec_augment A_ : Optional[Any] = mask_time_prob A_ : int = mask_time_length A_ : Any = mask_time_min_masks A_ : Optional[int] = mask_feature_prob A_ : Tuple = mask_feature_length # parameters for pretraining with codevector quantized representations A_ : int = num_codevectors_per_group A_ : Any = num_codevector_groups A_ : List[Any] = contrastive_logits_temperature A_ : Optional[Any] = num_negatives A_ : Optional[Any] = codevector_dim A_ : int = proj_codevector_dim A_ : int = diversity_loss_weight # ctc loss A_ : Union[str, Any] = ctc_loss_reduction A_ : Any = ctc_zero_infinity # adapter A_ : int = add_adapter A_ : Optional[Any] = adapter_kernel_size A_ : Optional[int] = adapter_stride A_ : Dict = num_adapter_layers A_ : str = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Tuple = list(lowercase ) A_ : Optional[Any] = list(lowercase ) A_ : Dict = list(lowercase ) A_ : Dict = xvector_output_dim @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
70
0
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline else: from .pipeline_unclip import UnCLIPPipeline from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline from .text_proj import UnCLIPTextProjModel
709
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger() def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase ) else: A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase ) if hidden_sizes == 1_92: A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase ) if hidden_sizes == 2_56: A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase ) if hidden_sizes == 3_84: A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase ) from_model.eval() A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval() A_ : Union[str, Any] = OrderedDict() A_ : Dict = from_model.state_dict() A_ : Tuple = list(from_model.state_dict().keys() ) A_ : str = list(our_model.state_dict().keys() ) print(len(__lowercase ) ,len(__lowercase ) ) for i in range(len(__lowercase ) ): A_ : str = weights[og_keys[i]] our_model.load_state_dict(__lowercase ) A_ : str = torch.randn((2, 3, 2_24, 2_24) ) A_ : str = from_model(__lowercase ) A_ : Optional[Any] = our_model(__lowercase ).logits assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one." A_ : List[str] = name print(__lowercase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) A_ : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ): '''simple docstring''' A_ : Dict = 'imagenet-1k-id2label.json' A_ : Optional[int] = 10_00 A_ : Optional[int] = (1, num_labels) A_ : int = 'huggingface/label-files' A_ : int = num_labels A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : int = {int(__lowercase ): v for k, v in idalabel.items()} A_ : List[str] = idalabel A_ : str = {v: k for k, v in idalabel.items()} A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase ) A_ : Any = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } A_ : Tuple = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
70
0
from manim import * class UpperCAmelCase ( __A ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 ) A_ : List[Any] = Rectangle(height=0.25 , width=0.25 ) A_ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) A_ : Optional[int] = [mem.copy() for i in range(6 )] A_ : Optional[int] = [mem.copy() for i in range(6 )] A_ : List[str] = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : Tuple = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : int = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 ) A_ : Optional[int] = Text('CPU' , font_size=2_4 ) A_ : Any = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowercase ) A_ : Any = [mem.copy() for i in range(4 )] A_ : List[str] = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : int = Text('GPU' , font_size=2_4 ) A_ : Optional[int] = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) gpu.move_to([-1, -1, 0] ) self.add(lowercase ) A_ : Optional[Any] = [mem.copy() for i in range(6 )] A_ : Optional[int] = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : int = Text('Model' , font_size=2_4 ) A_ : Any = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) model.move_to([3, -1.0, 0] ) self.add(lowercase ) A_ : Optional[int] = [] A_ : Union[str, Any] = [] A_ : Dict = [] for i, rect in enumerate(lowercase ): rect.set_stroke(lowercase ) A_ : Tuple = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=lowercase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowercase , buff=0.0 ) self.add(lowercase ) model_cpu_arr.append(lowercase ) self.add(*lowercase , *lowercase , *lowercase ) A_ : Union[str, Any] = [mem.copy() for i in range(6 )] A_ : Tuple = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : int = Text('Loaded Checkpoint' , font_size=2_4 ) A_ : Union[str, Any] = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) checkpoint.move_to([3, 0.5, 0] ) self.add(lowercase ) A_ : Tuple = [] A_ : List[Any] = [] for i, rect in enumerate(lowercase ): A_ : Tuple = fill.copy().set_fill(lowercase , opacity=0.7 ) target.move_to(lowercase ) ckpt_arr.append(lowercase ) A_ : Tuple = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(lowercase ) self.add(*lowercase , *lowercase ) A_ : int = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A_ : Tuple = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowercase , lowercase ) A_ : Any = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , ) blue_text.next_to(lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowercase ) A_ : Any = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=2_4 , ) step_a.move_to([2, 2, 0] ) A_ : Union[str, Any] = [meta_mem.copy() for i in range(6 )] A_ : List[Any] = [meta_mem.copy() for i in range(6 )] A_ : Union[str, Any] = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : str = VGroup(*lowercase ).arrange(lowercase , buff=0 ) A_ : Any = VGroup(lowercase , lowercase ).arrange(lowercase , buff=0 ) A_ : int = Text('Disk' , font_size=2_4 ) A_ : Dict = Group(lowercase , lowercase ).arrange(lowercase , buff=0.5 , aligned_edge=lowercase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(lowercase , run_time=3 ) , Write(lowercase , run_time=1 ) , Create(lowercase , run_time=1 ) ) A_ : Optional[int] = [] for i, rect in enumerate(lowercase ): A_ : str = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(lowercase , run_time=1.5 ) ) self.play(*lowercase ) self.play(FadeOut(lowercase ) ) A_ : Any = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=2_4 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowercase , run_time=3 ) ) self.play( FadeOut(lowercase , lowercase , *lowercase , *lowercase ) , ) self.wait()
710
def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' A_ : int = word.split() def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str: A_ : Optional[Any] = max_width - width A_ : Union[str, Any] = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: A_ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] A_ : int = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] A_ : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 A_ : Tuple = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) A_ : List[str] = [] A_ : list[str] = [] A_ : Dict = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase ,__lowercase ,__lowercase ) ) # reset new line and new width A_ , A_ : Any = [word], len(__lowercase ) A_ : int = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
70
0
import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" A_ : List[str] = str(id_ ) A_ : Union[str, Any] = None A_ : List[Any] = None A_ : str = [] A_ : str = {} # {vertex:distance} def __lt__( self , lowercase ): """simple docstring""" return self.key < other.key def __repr__( self ): """simple docstring""" return self.id def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" self.neighbors.append(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Optional[Any] = weight def UpperCamelCase ( __lowercase : Dict ,__lowercase : int ,__lowercase : int ,__lowercase : List[str] ): '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] ,__lowercase ) graph[b - 1].add_edge(graph[a - 1] ,__lowercase ) def UpperCamelCase ( __lowercase : list ,__lowercase : Vertex ): '''simple docstring''' A_ : str = [] for u in graph: A_ : Optional[Any] = math.inf A_ : List[str] = None A_ : Any = 0 A_ : Any = graph[:] while q: A_ : Tuple = min(__lowercase ) q.remove(__lowercase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): A_ : int = u A_ : Any = u.edges[v.id] for i in range(1 ,len(__lowercase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def UpperCamelCase ( __lowercase : list ,__lowercase : Vertex ): '''simple docstring''' for u in graph: A_ : Dict = math.inf A_ : Optional[Any] = None A_ : List[str] = 0 A_ : List[Any] = list(__lowercase ) hq.heapify(__lowercase ) while h: A_ : str = hq.heappop(__lowercase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): A_ : Any = u A_ : Tuple = u.edges[v.id] hq.heapify(__lowercase ) for i in range(1 ,len(__lowercase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def UpperCamelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
711
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _UpperCAmelCase = logging.getLogger(__name__) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''summarization''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ROUGE_KEYS lowerCamelCase_ = '''rouge2''' def __init__( self , lowercase , **lowercase ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: A_ : str = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) A_ : List[str] = Path(self.output_dir ) / 'metrics.json' A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) A_ : str = 0 A_ : Any = defaultdict(lowercase ) A_ : Union[str, Any] = self.config.model_type A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size A_ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } A_ : Optional[Any] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} A_ : Tuple = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) A_ : int = get_git_info()['repo_sha'] A_ : int = hparams.num_workers A_ : Union[str, Any] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] A_ : Any = self.decoder_start_token_id A_ : str = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) A_ : Union[str, Any] = False A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: A_ : int = self.hparams.eval_max_gen_length else: A_ : List[Any] = self.model.config.max_length A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) A_ : int = True return readable_batch def lowerCAmelCase_ ( self , lowercase , **lowercase ): """simple docstring""" return self.model(lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[Any] = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = self.tokenizer.pad_token_id A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask'] A_ : str = batch['labels'] if isinstance(self.model , lowercase ): A_ : Optional[int] = self.model._shift_right(lowercase ) else: A_ : Any = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero A_ : Optional[Any] = decoder_input_ids self.save_readable_batch(lowercase ) A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) A_ : Dict = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 ) A_ , A_ : Any = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def lowerCAmelCase_ ( self ): """simple docstring""" return self.tokenizer.pad_token_id def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : str = self._step(lowercase ) A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) ) # tokens per batch A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() A_ : str = batch['input_ids'].shape[0] A_ : Any = batch['input_ids'].eq(self.pad ).sum() A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase="val" ): """simple docstring""" self.step_count += 1 A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} A_ : Dict = losses['loss'] A_ : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } A_ : Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} A_ : Tuple = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path A_ : Dict = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_rouge(lowercase , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') A_ : Optional[int] = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) A_ : int = (time.time() - ta) / batch['input_ids'].shape[0] A_ : List[str] = self.ids_to_clean_text(lowercase ) A_ : List[str] = self.ids_to_clean_text(batch['labels'] ) A_ : List[Any] = self._step(lowercase ) A_ : int = dict(zip(self.loss_names , lowercase ) ) A_ : Dict = self.calc_generative_metrics(lowercase , lowercase ) A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.validation_epoch_end(lowercase , prefix='test' ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = self.n_obs[type_path] A_ : List[Any] = self.target_lens[type_path] A_ : str = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ): """simple docstring""" A_ : Optional[int] = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": A_ : str = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( '--max_source_length' , default=1_0_2_4 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=5_6 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase ) parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase ) parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( '--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''translation''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ['''bleu'''] lowerCamelCase_ = '''bleu''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , **lowercase ) A_ : List[Any] = hparams.src_lang A_ : str = hparams.tgt_lang def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_bleu(lowercase , lowercase ) def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=__lowercase ) check_output_dir(__lowercase ,expected_items=3 ) if model is None: if "summarization" in args.task: A_ : SummarizationModule = SummarizationModule(__lowercase ) else: A_ : SummarizationModule = TranslationModule(__lowercase ) A_ : Optional[int] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): A_ : List[str] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase ) A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience ) else: A_ : str = False A_ : Dict = args.val_metric == 'loss' A_ : pl.Trainer = generic_train( __lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback( args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,) pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model A_ : Optional[Any] = '' A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) ) if checkpoints: A_ : List[Any] = checkpoints[-1] A_ : Any = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() _UpperCAmelCase = pl.Trainer.add_argparse_args(parser) _UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _UpperCAmelCase = parser.parse_args() main(args)
70
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''mobilenet_v1''' def __init__( self , lowercase=3 , lowercase=2_2_4 , lowercase=1.0 , lowercase=8 , lowercase="relu6" , lowercase=True , lowercase=0.999 , lowercase=0.02 , lowercase=0.001 , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A_ : Any = num_channels A_ : Any = image_size A_ : str = depth_multiplier A_ : Dict = min_depth A_ : Union[str, Any] = hidden_act A_ : int = tf_padding A_ : Dict = classifier_dropout_prob A_ : Optional[int] = initializer_range A_ : str = layer_norm_eps class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = version.parse('''1.11''' ) @property def lowerCAmelCase_ ( self ): """simple docstring""" return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def lowerCAmelCase_ ( self ): """simple docstring""" if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def lowerCAmelCase_ ( self ): """simple docstring""" return 1E-4
712
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ): """simple docstring""" A_ : List[Any] = parent A_ : Optional[Any] = batch_size A_ : Dict = image_size A_ : str = num_channels A_ : Union[str, Any] = embeddings_size A_ : Optional[Any] = hidden_sizes A_ : Any = depths A_ : List[str] = is_training A_ : int = use_labels A_ : Optional[Any] = hidden_act A_ : List[Any] = num_labels A_ : Optional[int] = scope A_ : int = len(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Any = TFRegNetModel(config=lowercase ) A_ : Optional[Any] = model(lowercase , training=lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : int = self.num_labels A_ : Tuple = TFRegNetForImageClassification(lowercase ) A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : List[Any] = config_and_inputs A_ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase_ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFRegNetModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(lowercase ) A_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[Any] = [*signature.parameters.keys()] A_ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : List[Any] = model_class(lowercase ) A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase ) A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : int = layer_type A_ : Tuple = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Any = True check_hidden_states_output(lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase ) A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowercase , lowercase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: A_ : Dict = model_class(lowercase ) A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase ) A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : Any = self._prepare_for_class(lowercase , lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A_ : int = self.default_image_processor A_ : List[str] = prepare_img() A_ : Any = image_processor(images=lowercase , return_tensors='tf' ) # forward pass A_ : Tuple = model(**lowercase , training=lowercase ) # verify the logits A_ : int = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
70
0
import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) _UpperCAmelCase = logging.getLogger(__name__) def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = argparse.ArgumentParser( description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' ) parser.add_argument('--file_path' ,type=__lowercase ,default='data/dump.txt' ,help='The path to the data.' ) parser.add_argument('--tokenizer_type' ,type=__lowercase ,default='bert' ,choices=['bert', 'roberta', 'gpt2'] ) parser.add_argument('--tokenizer_name' ,type=__lowercase ,default='bert-base-uncased' ,help='The tokenizer to use.' ) parser.add_argument('--dump_file' ,type=__lowercase ,default='data/dump' ,help='The dump file prefix.' ) A_ : List[str] = parser.parse_args() logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' ) if args.tokenizer_type == "bert": A_ : Optional[Any] = BertTokenizer.from_pretrained(args.tokenizer_name ) A_ : Union[str, Any] = tokenizer.special_tokens_map['cls_token'] # `[CLS]` A_ : Optional[int] = tokenizer.special_tokens_map['sep_token'] # `[SEP]` elif args.tokenizer_type == "roberta": A_ : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name ) A_ : Optional[Any] = tokenizer.special_tokens_map['cls_token'] # `<s>` A_ : Dict = tokenizer.special_tokens_map['sep_token'] # `</s>` elif args.tokenizer_type == "gpt2": A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name ) A_ : Optional[int] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>` A_ : Optional[Any] = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>` logger.info(f'''Loading text from {args.file_path}''' ) with open(args.file_path ,'r' ,encoding='utf8' ) as fp: A_ : str = fp.readlines() logger.info('Start encoding' ) logger.info(f'''{len(__lowercase )} examples to process.''' ) A_ : Union[str, Any] = [] A_ : Optional[Any] = 0 A_ : Union[str, Any] = 1_00_00 A_ : Union[str, Any] = time.time() for text in data: A_ : Tuple = f'''{bos} {text.strip()} {sep}''' A_ : List[Any] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase ) rslt.append(__lowercase ) iter += 1 if iter % interval == 0: A_ : str = time.time() logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' ) A_ : str = time.time() logger.info('Finished binarization' ) logger.info(f'''{len(__lowercase )} examples processed.''' ) A_ : int = f'''{args.dump_file}.{args.tokenizer_name}.pickle''' A_ : Tuple = tokenizer.vocab_size if vocab_size < (1 << 16): A_ : int = [np.uintaa(__lowercase ) for d in rslt] else: A_ : str = [np.intaa(__lowercase ) for d in rslt] random.shuffle(rslt_ ) logger.info(f'''Dump to {dp_file}''' ) with open(__lowercase ,'wb' ) as handle: pickle.dump(rslt_ ,__lowercase ,protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
713
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ): '''simple docstring''' A_ : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ): '''simple docstring''' A_ : int = 0 while b > 0: if b & 1: A_ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
70
0
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCAmelCase : '''simple docstring''' def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" raise NotImplementedError() def lowerCAmelCase_ ( self ): """simple docstring""" raise NotImplementedError() class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , lowercase = False , **lowercase ): """simple docstring""" A_ : List[Any] = tokenizer A_ : Optional[Any] = skip_prompt A_ : Optional[int] = decode_kwargs # variables used in the streaming process A_ : Union[str, Any] = [] A_ : str = 0 A_ : Tuple = True def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('TextStreamer only supports batch size 1' ) elif len(value.shape ) > 1: A_ : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: A_ : Dict = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) A_ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('\n' ): A_ : Tuple = text[self.print_len :] A_ : List[Any] = [] A_ : int = 0 # If the last token is a CJK character, we print the characters. elif len(lowercase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): A_ : List[str] = text[self.print_len :] self.print_len += len(lowercase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: A_ : Optional[int] = text[self.print_len : text.rfind(' ' ) + 1] self.print_len += len(lowercase ) self.on_finalized_text(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" if len(self.token_cache ) > 0: A_ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) A_ : Optional[Any] = text[self.print_len :] A_ : Dict = [] A_ : Optional[Any] = 0 else: A_ : List[str] = '' A_ : Tuple = True self.on_finalized_text(lowercase , stream_end=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" print(lowercase , flush=lowercase , end='' if not stream_end else None ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if ( (cp >= 0X4_e00 and cp <= 0X9_fff) or (cp >= 0X3_400 and cp <= 0X4_dbf) # or (cp >= 0X20_000 and cp <= 0X2a_6df) # or (cp >= 0X2a_700 and cp <= 0X2b_73f) # or (cp >= 0X2b_740 and cp <= 0X2b_81f) # or (cp >= 0X2b_820 and cp <= 0X2c_eaf) # or (cp >= 0Xf_900 and cp <= 0Xf_aff) or (cp >= 0X2f_800 and cp <= 0X2f_a1f) # ): # return True return False class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , lowercase = False , lowercase = None , **lowercase ): """simple docstring""" super().__init__(lowercase , lowercase , **lowercase ) A_ : Union[str, Any] = Queue() A_ : str = None A_ : Optional[int] = timeout def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" self.text_queue.put(lowercase , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self ): """simple docstring""" return self def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
714
def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if length <= 0 or not isinstance(__lowercase ,__lowercase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(__lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
70
0
import json import logging import os import socket import git import numpy as np import torch logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) _UpperCAmelCase = logging.getLogger(__name__) def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : str = git.Repo(search_parent_directories=__lowercase ) A_ : int = { 'repo_id': str(__lowercase ), 'repo_sha': str(repo.head.object.hexsha ), 'repo_branch': str(repo.active_branch ), } with open(os.path.join(__lowercase ,'git_log.json' ) ,'w' ) as f: json.dump(__lowercase ,__lowercase ,indent=4 ) def UpperCamelCase ( __lowercase : Any ): '''simple docstring''' if params.n_gpu <= 0: A_ : Any = 0 A_ : List[Any] = -1 A_ : int = True A_ : List[Any] = False return assert torch.cuda.is_available() logger.info('Initializing GPUs' ) if params.n_gpu > 1: assert params.local_rank != -1 A_ : Optional[int] = int(os.environ['WORLD_SIZE'] ) A_ : Optional[Any] = int(os.environ['N_GPU_NODE'] ) A_ : Optional[int] = int(os.environ['RANK'] ) # number of nodes / node ID A_ : Dict = params.world_size // params.n_gpu_per_node A_ : str = params.global_rank // params.n_gpu_per_node A_ : Union[str, Any] = True assert params.n_nodes == int(os.environ['N_NODES'] ) assert params.node_id == int(os.environ['NODE_RANK'] ) # local job (single GPU) else: assert params.local_rank == -1 A_ : int = 1 A_ : Optional[Any] = 0 A_ : Any = 0 A_ : List[Any] = 0 A_ : int = 1 A_ : Optional[Any] = 1 A_ : Optional[Any] = False # sanity checks assert params.n_nodes >= 1 assert 0 <= params.node_id < params.n_nodes assert 0 <= params.local_rank <= params.global_rank < params.world_size assert params.world_size == params.n_nodes * params.n_gpu_per_node # define whether this is the master process / if we are in multi-node distributed mode A_ : Optional[Any] = params.node_id == 0 and params.local_rank == 0 A_ : int = params.n_nodes > 1 # summary A_ : Tuple = f'''--- Global rank: {params.global_rank} - ''' logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes ) logger.info(PREFIX + 'Node ID : %i' % params.node_id ) logger.info(PREFIX + 'Local rank : %i' % params.local_rank ) logger.info(PREFIX + 'World size : %i' % params.world_size ) logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node ) logger.info(PREFIX + 'Master : %s' % str(params.is_master ) ) logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) ) logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) ) logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() ) # set GPU device torch.cuda.set_device(params.local_rank ) # initialize multi-GPU if params.multi_gpu: logger.info('Initializing PyTorch distributed' ) torch.distributed.init_process_group( init_method='env://' ,backend='nccl' ,) def UpperCamelCase ( __lowercase : List[Any] ): '''simple docstring''' np.random.seed(args.seed ) torch.manual_seed(args.seed ) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed )
715
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase ( ): '''simple docstring''' A_ , A_ : Any = 9, 14 # noqa: F841 A_ : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : List[Any] = defaultdict(__lowercase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A_ : Tuple = mst(__lowercase ) A_ : Tuple = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : List[Any] = tuple(answer[:2] ) A_ : Union[str, Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
70
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = CustomTokenizer pass
716
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): '''simple docstring''' A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase ) A_ : Any = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowercase ) env_command_parser(subparsers=__lowercase ) launch_command_parser(subparsers=__lowercase ) tpu_command_parser(subparsers=__lowercase ) test_command_parser(subparsers=__lowercase ) # Let's go A_ : Optional[Any] = parser.parse_args() if not hasattr(__lowercase ,'func' ): parser.print_help() exit(1 ) # Run args.func(__lowercase ) if __name__ == "__main__": main()
70
0
import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEImageProcessor, ) def UpperCamelCase ( __lowercase : List[str] ): '''simple docstring''' A_ : Any = VideoMAEConfig() set_architecture_configs(__lowercase ,__lowercase ) if "finetuned" not in model_name: A_ : int = False if "finetuned" in model_name: A_ : Any = 'huggingface/label-files' if "kinetics" in model_name: A_ : Dict = 4_00 A_ : Union[str, Any] = 'kinetics400-id2label.json' elif "ssv2" in model_name: A_ : Dict = 1_74 A_ : List[Any] = 'something-something-v2-id2label.json' else: raise ValueError('Model name should either contain \'kinetics\' or \'ssv2\' in case it\'s fine-tuned.' ) A_ : Optional[int] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : Union[str, Any] = {int(__lowercase ): v for k, v in idalabel.items()} A_ : Any = idalabel A_ : Dict = {v: k for k, v in idalabel.items()} return config def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Union[str, Any] ): '''simple docstring''' if "small" in model_name: A_ : Optional[Any] = 3_84 A_ : Union[str, Any] = 15_36 A_ : List[Any] = 12 A_ : Union[str, Any] = 16 A_ : Optional[int] = 12 A_ : Tuple = 3 A_ : str = 1_92 A_ : Union[str, Any] = 7_68 elif "large" in model_name: A_ : Any = 10_24 A_ : int = 40_96 A_ : int = 24 A_ : Optional[Any] = 16 A_ : int = 12 A_ : Optional[int] = 8 A_ : Union[str, Any] = 5_12 A_ : List[Any] = 20_48 elif "huge" in model_name: A_ : Union[str, Any] = 12_80 A_ : Union[str, Any] = 51_20 A_ : List[Any] = 32 A_ : Tuple = 16 A_ : List[str] = 12 A_ : List[str] = 8 A_ : List[Any] = 6_40 A_ : int = 25_60 elif "base" not in model_name: raise ValueError('Model name should include either "small", "base", "large", or "huge"' ) def UpperCamelCase ( __lowercase : Any ): '''simple docstring''' if "encoder." in name: A_ : int = name.replace('encoder.' ,'' ) if "cls_token" in name: A_ : List[Any] = name.replace('cls_token' ,'videomae.embeddings.cls_token' ) if "decoder_pos_embed" in name: A_ : List[str] = name.replace('decoder_pos_embed' ,'decoder.decoder_pos_embed' ) if "pos_embed" in name and "decoder" not in name: A_ : Tuple = name.replace('pos_embed' ,'videomae.embeddings.position_embeddings' ) if "patch_embed.proj" in name: A_ : Optional[Any] = name.replace('patch_embed.proj' ,'videomae.embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: A_ : int = name.replace('patch_embed.norm' ,'videomae.embeddings.norm' ) if "decoder.blocks" in name: A_ : Union[str, Any] = name.replace('decoder.blocks' ,'decoder.decoder_layers' ) if "blocks" in name: A_ : Dict = name.replace('blocks' ,'videomae.encoder.layer' ) if "attn.proj" in name: A_ : str = name.replace('attn.proj' ,'attention.output.dense' ) if "attn" in name and "bias" not in name: A_ : Optional[int] = name.replace('attn' ,'attention.self' ) if "attn" in name: A_ : str = name.replace('attn' ,'attention.attention' ) if "norm1" in name: A_ : int = name.replace('norm1' ,'layernorm_before' ) if "norm2" in name: A_ : Tuple = name.replace('norm2' ,'layernorm_after' ) if "mlp.fc1" in name: A_ : int = name.replace('mlp.fc1' ,'intermediate.dense' ) if "mlp.fc2" in name: A_ : Any = name.replace('mlp.fc2' ,'output.dense' ) if "decoder_embed" in name: A_ : Union[str, Any] = name.replace('decoder_embed' ,'decoder.decoder_embed' ) if "decoder_norm" in name: A_ : str = name.replace('decoder_norm' ,'decoder.decoder_norm' ) if "decoder_pred" in name: A_ : str = name.replace('decoder_pred' ,'decoder.decoder_pred' ) if "norm.weight" in name and "decoder" not in name and "fc" not in name: A_ : Dict = name.replace('norm.weight' ,'videomae.layernorm.weight' ) if "norm.bias" in name and "decoder" not in name and "fc" not in name: A_ : int = name.replace('norm.bias' ,'videomae.layernorm.bias' ) if "head" in name and "decoder" not in name: A_ : Dict = name.replace('head' ,'classifier' ) return name def UpperCamelCase ( __lowercase : int ,__lowercase : Any ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A_ : Dict = orig_state_dict.pop(__lowercase ) if key.startswith('encoder.' ): A_ : Optional[int] = key.replace('encoder.' ,'' ) if "qkv" in key: A_ : Any = key.split('.' ) if key.startswith('decoder.blocks' ): A_ : Optional[Any] = config.decoder_hidden_size A_ : Optional[int] = int(key_split[2] ) A_ : List[Any] = 'decoder.decoder_layers.' if "weight" in key: A_ : Optional[Any] = val[:dim, :] A_ : Tuple = val[dim : dim * 2, :] A_ : Union[str, Any] = val[-dim:, :] else: A_ : str = config.hidden_size A_ : Optional[Any] = int(key_split[1] ) A_ : Optional[int] = 'videomae.encoder.layer.' if "weight" in key: A_ : Tuple = val[:dim, :] A_ : Optional[int] = val[dim : dim * 2, :] A_ : Dict = val[-dim:, :] else: A_ : Optional[int] = val return orig_state_dict def UpperCamelCase ( ): '''simple docstring''' A_ : Union[str, Any] = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' ) A_ : Optional[int] = np.load(__lowercase ) return list(__lowercase ) def UpperCamelCase ( __lowercase : Dict ,__lowercase : Any ,__lowercase : Union[str, Any] ,__lowercase : Any ): '''simple docstring''' A_ : Dict = get_videomae_config(__lowercase ) if "finetuned" in model_name: A_ : List[str] = VideoMAEForVideoClassification(__lowercase ) else: A_ : Tuple = VideoMAEForPreTraining(__lowercase ) # download original checkpoint, hosted on Google Drive A_ : Optional[int] = 'pytorch_model.bin' gdown.cached_download(__lowercase ,__lowercase ,quiet=__lowercase ) A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' ) if "model" in files: A_ : int = files['model'] else: A_ : Union[str, Any] = files['module'] A_ : Tuple = convert_state_dict(__lowercase ,__lowercase ) model.load_state_dict(__lowercase ) model.eval() # verify model on basic input A_ : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] ) A_ : Tuple = prepare_video() A_ : Union[str, Any] = image_processor(__lowercase ,return_tensors='pt' ) if "finetuned" not in model_name: A_ : List[Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' ,filename='bool_masked_pos.pt' ) A_ : Optional[Any] = torch.load(__lowercase ) A_ : str = model(**__lowercase ) A_ : Tuple = outputs.logits A_ : Dict = [ 'videomae-small-finetuned-kinetics', 'videomae-small-finetuned-ssv2', # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) 'videomae-base-short', 'videomae-base-short-finetuned-kinetics', 'videomae-base', 'videomae-base-finetuned-kinetics', 'videomae-large', 'videomae-large-finetuned-kinetics', 'videomae-huge-finetuned-kinetics', # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) 'videomae-base-short-ssv2', 'videomae-base-short-finetuned-ssv2', 'videomae-base-ssv2', 'videomae-base-finetuned-ssv2', ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-small-finetuned-kinetics": A_ : Union[str, Any] = torch.Size([1, 4_00] ) A_ : Dict = torch.tensor([-0.92_91, -0.40_61, -0.93_07] ) elif model_name == "videomae-small-finetuned-ssv2": A_ : Optional[int] = torch.Size([1, 1_74] ) A_ : List[str] = torch.tensor([0.26_71, -0.46_89, -0.82_35] ) elif model_name == "videomae-base": A_ : Any = torch.Size([1, 14_08, 15_36] ) A_ : List[str] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] ) elif model_name == "videomae-base-short": A_ : Optional[int] = torch.Size([1, 14_08, 15_36] ) A_ : Union[str, Any] = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] ) # we verified the loss both for normalized and unnormalized targets for this one A_ : Dict = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] ) elif model_name == "videomae-large": A_ : Tuple = torch.Size([1, 14_08, 15_36] ) A_ : Optional[int] = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] ) elif model_name == "videomae-large-finetuned-kinetics": A_ : Tuple = torch.Size([1, 4_00] ) A_ : Union[str, Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25] ) elif model_name == "videomae-huge-finetuned-kinetics": A_ : Dict = torch.Size([1, 4_00] ) A_ : List[Any] = torch.tensor([0.24_33, 0.16_32, -0.48_94] ) elif model_name == "videomae-base-short-finetuned-kinetics": A_ : Any = torch.Size([1, 4_00] ) A_ : Optional[Any] = torch.tensor([0.65_88, 0.09_90, -0.24_93] ) elif model_name == "videomae-base-finetuned-kinetics": A_ : Any = torch.Size([1, 4_00] ) A_ : Any = torch.tensor([0.36_69, -0.06_88, -0.24_21] ) elif model_name == "videomae-base-short-ssv2": A_ : str = torch.Size([1, 14_08, 15_36] ) A_ : Tuple = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] ) elif model_name == "videomae-base-short-finetuned-ssv2": A_ : Dict = torch.Size([1, 1_74] ) A_ : List[str] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] ) elif model_name == "videomae-base-ssv2": A_ : Dict = torch.Size([1, 14_08, 15_36] ) A_ : Optional[int] = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] ) elif model_name == "videomae-base-finetuned-ssv2": A_ : Any = torch.Size([1, 1_74] ) A_ : Tuple = torch.tensor([0.19_61, -0.83_37, -0.63_89] ) else: raise ValueError(f'''Model name not supported. Should be one of {model_names}''' ) # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3] ,__lowercase ,atol=1e-4 ) else: print('Logits:' ,logits[0, :3, :3] ) assert torch.allclose(logits[0, :3, :3] ,__lowercase ,atol=1e-4 ) print('Logits ok!' ) # verify loss, if applicable if model_name == "videomae-base-short": A_ : Tuple = outputs.loss assert torch.allclose(__lowercase ,__lowercase ,atol=1e-4 ) print('Loss ok!' ) if pytorch_dump_folder_path is not None: print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__lowercase ) model.save_pretrained(__lowercase ) if push_to_hub: print('Pushing to the hub...' ) model.push_to_hub(__lowercase ,organization='nielsr' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4""", type=str, help=( """URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct""" """ download link.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default="""/Users/nielsrogge/Documents/VideoMAE/Test""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--model_name""", default="""videomae-base""", type=str, help="""Name of the model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) _UpperCAmelCase = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
717
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = DistilBertTokenizer lowerCamelCase_ = DistilBertTokenizerFast lowerCamelCase_ = True @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
70
0
'''simple docstring''' def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : List[str] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
718
import random def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Tuple = num - 1 A_ : Optional[Any] = 0 while s % 2 == 0: A_ : Optional[int] = s // 2 t += 1 for _ in range(5 ): A_ : Optional[int] = random.randrange(2 ,num - 1 ) A_ : Any = pow(__lowercase ,__lowercase ,__lowercase ) if v != 1: A_ : List[str] = 0 while v != (num - 1): if i == t - 1: return False else: A_ : Union[str, Any] = i + 1 A_ : Tuple = (v**2) % num return True def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if num < 2: return False A_ : Optional[Any] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowercase ) def UpperCamelCase ( __lowercase : int = 10_24 ): '''simple docstring''' while True: A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) ) if is_prime_low_num(__lowercase ): return num if __name__ == "__main__": _UpperCAmelCase = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
70
0
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" debug_launcher(test_script.main ) def lowerCAmelCase_ ( self ): """simple docstring""" debug_launcher(test_ops.main )
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
# limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
720
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = FlaxAutoencoderKL @property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = 4 A_ : int = 3 A_ : List[str] = (3_2, 3_2) A_ : Any = jax.random.PRNGKey(0 ) A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } A_ : int = self.dummy_input return init_dict, inputs_dict
70
0
def UpperCamelCase ( __lowercase : str ): '''simple docstring''' return "".join(chr(ord(__lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
721
import numpy as np _UpperCAmelCase = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" A_ : Any = np.array(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE ) A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : int = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = message.lower() A_ : Tuple = message.replace(' ' , '' ) A_ : int = message.replace('j' , 'i' ) A_ : Any = np.empty((2, len(lowercase )) ) for letter_index in range(len(lowercase ) ): A_ : Optional[int] = self.letter_to_numbers(message[letter_index] ) A_ : Union[str, Any] = numbers[0] A_ : Union[str, Any] = numbers[1] A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) ) A_ : int = '' for numbers_index in range(len(lowercase ) ): A_ : str = int(second_step[numbers_index * 2] ) A_ : str = int(second_step[(numbers_index * 2) + 1] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : Tuple = encoded_message + letter return encoded_message def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[int] = message.lower() message.replace(' ' , '' ) A_ : Tuple = np.empty(2 * len(lowercase ) ) for letter_index in range(len(lowercase ) ): A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] ) A_ : Optional[int] = numbers[0] A_ : Dict = numbers[1] A_ : Optional[int] = first_step.reshape((2, len(lowercase )) ) A_ : List[str] = '' for numbers_index in range(len(lowercase ) ): A_ : List[Any] = int(second_step[0, numbers_index] ) A_ : Optional[int] = int(second_step[1, numbers_index] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : str = decoded_message + letter return decoded_message
70
0
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger() def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase ) else: A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase ) if hidden_sizes == 1_92: A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase ) if hidden_sizes == 2_56: A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase ) if hidden_sizes == 3_84: A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase ) from_model.eval() A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval() A_ : Union[str, Any] = OrderedDict() A_ : Dict = from_model.state_dict() A_ : Tuple = list(from_model.state_dict().keys() ) A_ : str = list(our_model.state_dict().keys() ) print(len(__lowercase ) ,len(__lowercase ) ) for i in range(len(__lowercase ) ): A_ : str = weights[og_keys[i]] our_model.load_state_dict(__lowercase ) A_ : str = torch.randn((2, 3, 2_24, 2_24) ) A_ : str = from_model(__lowercase ) A_ : Optional[Any] = our_model(__lowercase ).logits assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one." A_ : List[str] = name print(__lowercase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) A_ : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ): '''simple docstring''' A_ : Dict = 'imagenet-1k-id2label.json' A_ : Optional[int] = 10_00 A_ : Optional[int] = (1, num_labels) A_ : int = 'huggingface/label-files' A_ : int = num_labels A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : int = {int(__lowercase ): v for k, v in idalabel.items()} A_ : List[str] = idalabel A_ : str = {v: k for k, v in idalabel.items()} A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase ) A_ : Any = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } A_ : Tuple = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
700
from math import sqrt def UpperCamelCase ( __lowercase : int = 1_00_00_00 ): '''simple docstring''' A_ : int = 0 A_ : int = 0 A_ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowercase ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
70
0
import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def UpperCamelCase ( __lowercase ): '''simple docstring''' A_ : Dict = args.pruning_method A_ : Optional[int] = args.threshold A_ : Tuple = args.model_name_or_path.rstrip('/' ) A_ : Union[str, Any] = args.target_model_path print(f'''Load fine-pruned model from {model_name_or_path}''' ) A_ : Any = torch.load(os.path.join(__lowercase ,'pytorch_model.bin' ) ) A_ : Optional[Any] = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: A_ : str = tensor print(f'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: A_ : Union[str, Any] = tensor print(f'''Copied layer {name}''' ) elif "bias" in name: A_ : str = tensor print(f'''Copied layer {name}''' ) else: if pruning_method == "magnitude": A_ : Optional[int] = MagnitudeBinarizer.apply(inputs=__lowercase ,threshold=__lowercase ) A_ : Tuple = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue A_ : int = name[:-6] A_ : int = model[f'''{prefix_}mask_scores'''] A_ : List[Any] = TopKBinarizer.apply(__lowercase ,__lowercase ) A_ : List[str] = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue A_ : Optional[int] = name[:-6] A_ : Optional[Any] = model[f'''{prefix_}mask_scores'''] A_ : Optional[Any] = ThresholdBinarizer.apply(__lowercase ,__lowercase ,__lowercase ) A_ : Any = tensor * mask print(f'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue A_ : Dict = name[:-6] A_ : str = model[f'''{prefix_}mask_scores'''] A_ : int = -0.1, 1.1 A_ : Union[str, Any] = torch.sigmoid(__lowercase ) A_ : List[Any] = s * (r - l) + l A_ : Tuple = s_bar.clamp(min=0.0 ,max=1.0 ) A_ : Optional[int] = tensor * mask print(f'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: A_ : int = os.path.join( os.path.dirname(__lowercase ) ,f'''bertarized_{os.path.basename(__lowercase )}''' ) if not os.path.isdir(__lowercase ): shutil.copytree(__lowercase ,__lowercase ) print(f'''\nCreated folder {target_model_path}''' ) torch.save(__lowercase ,os.path.join(__lowercase ,'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--pruning_method""", choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""], type=str, required=True, help=( """Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,""" """ sigmoied_threshold = Soft movement pruning)""" ), ) parser.add_argument( """--threshold""", type=float, required=False, help=( """For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.""" """For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.""" """Not needed for `l0`""" ), ) parser.add_argument( """--model_name_or_path""", type=str, required=True, help="""Folder containing the model that was previously fine-pruned""", ) parser.add_argument( """--target_model_path""", default=None, type=str, required=False, help="""Folder containing the model that was previously fine-pruned""", ) _UpperCAmelCase = parser.parse_args() main(args)
701
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ): """simple docstring""" super().__init__() A_ : Tuple = initial_learning_rate A_ : List[str] = warmup_steps A_ : int = power A_ : Dict = decay_schedule_fn A_ : Any = name def __call__( self , lowercase ): """simple docstring""" with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A_ : Optional[int] = tf.cast(lowercase , tf.floataa ) A_ : int = tf.cast(self.warmup_steps , tf.floataa ) A_ : Optional[int] = global_step_float / warmup_steps_float A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,): '''simple docstring''' A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,) if num_warmup_steps: A_ : Tuple = WarmUp( initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,) if weight_decay_rate > 0.0: A_ : Union[str, Any] = AdamWeightDecay( learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,) else: A_ : Dict = tf.keras.optimizers.Adam( learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ): """simple docstring""" super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase ) A_ : Dict = weight_decay_rate A_ : Union[str, Any] = include_in_weight_decay A_ : str = exclude_from_weight_decay @classmethod def lowerCAmelCase_ ( cls , lowercase ): """simple docstring""" A_ : Tuple = {'WarmUp': WarmUp} return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase ) A_ : Optional[Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ , A_ : Optional[int] = list(zip(*lowercase ) ) return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} A_ : List[str] = apply_state or {} A_ : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: A_ : Dict = self._fallback_apply_state(lowercase , lowercase ) A_ : int = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase , lowercase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase , lowercase ) is not None: return False return True class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self ): """simple docstring""" A_ : int = [] A_ : Optional[int] = None @property def lowerCAmelCase_ ( self ): """simple docstring""" if self._accum_steps is None: A_ : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowercase ): """simple docstring""" if not self._gradients: A_ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' ) for accum_gradient, gradient in zip(self._gradients , lowercase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase ) self._accum_steps.assign_add(1 ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase ) )
70
0
from __future__ import annotations from typing import TypedDict class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = 4_2 lowerCamelCase_ = 4_2 def UpperCamelCase ( __lowercase : str ): '''simple docstring''' if not isinstance(__lowercase ,__lowercase ): raise TypeError('The parameter s type must be str.' ) return [s[i:] + s[:i] for i in range(len(__lowercase ) )] def UpperCamelCase ( __lowercase : str ): '''simple docstring''' if not isinstance(__lowercase ,__lowercase ): raise TypeError('The parameter s type must be str.' ) if not s: raise ValueError('The parameter s must not be empty.' ) A_ : Optional[Any] = all_rotations(__lowercase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation A_ : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__lowercase ), } return response def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' if not isinstance(__lowercase ,__lowercase ): raise TypeError('The parameter bwt_string type must be str.' ) if not bwt_string: raise ValueError('The parameter bwt_string must not be empty.' ) try: A_ : str = int(__lowercase ) except ValueError: raise TypeError( 'The parameter idx_original_string type must be int or passive' ' of cast to int.' ) if idx_original_string < 0: raise ValueError('The parameter idx_original_string must not be lower than 0.' ) if idx_original_string >= len(__lowercase ): raise ValueError( 'The parameter idx_original_string must be lower than' ' len(bwt_string).' ) A_ : Optional[Any] = [''] * len(__lowercase ) for _ in range(len(__lowercase ) ): for i in range(len(__lowercase ) ): A_ : Union[str, Any] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": _UpperCAmelCase = """Provide a string that I will generate its BWT transform: """ _UpperCAmelCase = input(entry_msg).strip() _UpperCAmelCase = bwt_transform(s) print( F"""Burrows Wheeler transform for string '{s}' results """ F"""in '{result['bwt_string']}'""" ) _UpperCAmelCase = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""]) print( F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """ F"""we get original string '{original_string}'""" )
702
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : Any = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[str] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
70
0
'''simple docstring''' import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = GPTSanJapaneseTokenizer lowerCamelCase_ = False lowerCamelCase_ = {'''do_clean_text''': False, '''add_prefix_space''': False} def lowerCAmelCase_ ( self ): """simple docstring""" super().setUp() # fmt: off A_ : int = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>'] # fmt: on A_ : Tuple = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀 A_ : int = {'unk_token': '<unk>'} A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) with open(self.emoji_file , 'w' ) as emoji_writer: emoji_writer.write(json.dumps(lowercase ) ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Tuple = 'こんにちは、世界。 \nこんばんは、㔺界。😀' A_ : List[str] = 'こんにちは、世界。 \nこんばんは、世界。😀' return input_text, output_text def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Any = self.get_input_output_texts(lowercase ) A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : Optional[Any] = tokenizer.decode(lowercase , clean_up_tokenization_spaces=lowercase ) return text, ids def lowerCAmelCase_ ( self ): """simple docstring""" pass # TODO add if relevant def lowerCAmelCase_ ( self ): """simple docstring""" pass # TODO add if relevant def lowerCAmelCase_ ( self ): """simple docstring""" pass # TODO add if relevant def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.get_tokenizer() # Testing tokenization A_ : List[str] = 'こんにちは、世界。 こんばんは、㔺界。' A_ : int = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。'] A_ : Optional[int] = tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing conversion to ids without special tokens A_ : List[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] A_ : List[str] = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing conversion to ids with special tokens A_ : Tuple = tokens + [tokenizer.unk_token] A_ : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 1_9] A_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase ) self.assertListEqual(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.get_tokenizer() # Testing tokenization A_ : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。' A_ : List[Any] = 'こんにちは、、、、世界。こんばんは、、、、世界。' A_ : List[str] = tokenizer.encode(lowercase ) A_ : Optional[int] = tokenizer.decode(lowercase ) self.assertEqual(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization A_ : Union[str, Any] = 'こんにちは、世界。' A_ : Any = 'こんばんは、㔺界。😀' A_ : List[str] = 'こんにちは、世界。こんばんは、世界。😀' A_ : Any = tokenizer.encode(prefix_text + input_text ) A_ : Union[str, Any] = tokenizer.encode('' , prefix_text=prefix_text + input_text ) A_ : List[Any] = tokenizer.encode(lowercase , prefix_text=lowercase ) A_ : List[Any] = tokenizer.decode(lowercase ) A_ : Union[str, Any] = tokenizer.decode(lowercase ) A_ : List[Any] = tokenizer.decode(lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) # Testing tokenization A_ : Optional[int] = 'こんにちは、世界。' A_ : List[str] = 'こんばんは、㔺界。😀' A_ : Optional[Any] = len(tokenizer.encode(lowercase ) ) - 2 A_ : Dict = len(tokenizer.encode(lowercase ) ) - 2 A_ : int = [1] + [0] * (len_prefix + len_text + 1) A_ : Any = [1] * (len_prefix + len_text + 1) + [0] A_ : Any = [1] + [1] * (len_prefix) + [0] * (len_text + 1) A_ : List[str] = tokenizer(prefix_text + input_text ).token_type_ids A_ : Optional[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids A_ : Dict = tokenizer(lowercase , prefix_text=lowercase ).token_type_ids self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) A_ : int = tokenizer.encode('あンいワ' ) A_ : Union[str, Any] = tokenizer.encode('' , prefix_text='あンいワ' ) A_ : List[Any] = tokenizer.encode('いワ' , prefix_text='あン' ) self.assertEqual(tokenizer.decode(lowercase ) , tokenizer.decode(lowercase ) ) self.assertEqual(tokenizer.decode(lowercase ) , tokenizer.decode(lowercase ) ) self.assertNotEqual(lowercase , lowercase ) self.assertNotEqual(lowercase , lowercase ) self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' ) A_ : Any = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']] A_ : Optional[int] = tokenizer(lowercase , padding=lowercase ) A_ : str = tokenizer.batch_encode_plus(lowercase , padding=lowercase ) # fmt: off A_ : str = [[3_5_9_9_3, 8_6_4_0, 2_5_9_4_8, 3_5_9_9_8, 3_0_6_4_7, 3_5_6_7_5, 3_5_9_9_9, 3_5_9_9_9], [3_5_9_9_3, 1_0_3_8_2, 9_8_6_8, 3_5_9_9_8, 3_0_6_4_6, 9_4_5_9, 3_0_6_4_6, 3_5_6_7_5]] A_ : int = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] A_ : Tuple = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , lowercase ) self.assertListEqual(x_token.token_type_ids , lowercase ) self.assertListEqual(x_token.attention_mask , lowercase ) self.assertListEqual(x_token_a.input_ids , lowercase ) self.assertListEqual(x_token_a.token_type_ids , lowercase ) self.assertListEqual(x_token_a.attention_mask , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" pass
703
def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : int = len(__lowercase ) A_ : List[Any] = sum(__lowercase ) A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 ,n + 1 ): A_ : Optional[Any] = True for i in range(1 ,s + 1 ): A_ : Tuple = False for i in range(1 ,n + 1 ): for j in range(1 ,s + 1 ): A_ : Dict = dp[i][j - 1] if arr[i - 1] <= j: A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) ,-1 ,-1 ): if dp[n][j] is True: A_ : List[Any] = s - 2 * j break return diff
70
0
import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """PoolFormerConfig""" # Base docstring _UpperCAmelCase = """sail/poolformer_s12""" _UpperCAmelCase = [1, 512, 7, 7] # Image classification docstring _UpperCAmelCase = """sail/poolformer_s12""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """sail/poolformer_s12""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def UpperCamelCase ( __lowercase : str ,__lowercase : float = 0.0 ,__lowercase : bool = False ): '''simple docstring''' if drop_prob == 0.0 or not training: return input A_ : List[Any] = 1 - drop_prob A_ : Union[str, Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets A_ : str = keep_prob + torch.rand(__lowercase ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize A_ : List[Any] = input.div(__lowercase ) * random_tensor return output class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase = None ): """simple docstring""" super().__init__() A_ : List[Any] = drop_prob def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return drop_path(lowercase , self.drop_prob , self.training ) def lowerCAmelCase_ ( self ): """simple docstring""" return "p={}".format(self.drop_prob ) class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" super().__init__() A_ : Any = patch_size if isinstance(lowercase , collections.abc.Iterable ) else (patch_size, patch_size) A_ : Any = stride if isinstance(lowercase , collections.abc.Iterable ) else (stride, stride) A_ : Dict = padding if isinstance(lowercase , collections.abc.Iterable ) else (padding, padding) A_ : int = nn.Convad(lowercase , lowercase , kernel_size=lowercase , stride=lowercase , padding=lowercase ) A_ : int = norm_layer(lowercase ) if norm_layer else nn.Identity() def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[Any] = self.projection(lowercase ) A_ : Optional[int] = self.norm(lowercase ) return embeddings class UpperCAmelCase ( nn.GroupNorm ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(1 , lowercase , **lowercase ) class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" super().__init__() A_ : List[Any] = nn.AvgPoolad(lowercase , stride=1 , padding=pool_size // 2 , count_include_pad=lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.pool(lowercase ) - hidden_states class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" super().__init__() A_ : Union[str, Any] = nn.Convad(lowercase , lowercase , 1 ) A_ : Tuple = nn.Convad(lowercase , lowercase , 1 ) A_ : List[str] = PoolFormerDropPath(lowercase ) if isinstance(config.hidden_act , lowercase ): A_ : Optional[int] = ACTaFN[config.hidden_act] else: A_ : Dict = config.hidden_act def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = self.conva(lowercase ) A_ : List[str] = self.act_fn(lowercase ) A_ : List[str] = self.drop(lowercase ) A_ : Any = self.conva(lowercase ) A_ : Union[str, Any] = self.drop(lowercase ) return hidden_states class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" super().__init__() A_ : List[str] = PoolFormerPooling(lowercase ) A_ : Dict = PoolFormerOutput(lowercase , lowercase , lowercase , lowercase ) A_ : List[str] = PoolFormerGroupNorm(lowercase ) A_ : Dict = PoolFormerGroupNorm(lowercase ) # Useful for training neural nets A_ : Union[str, Any] = PoolFormerDropPath(lowercase ) if drop_path > 0.0 else nn.Identity() A_ : str = config.use_layer_scale if config.use_layer_scale: A_ : str = nn.Parameter( config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase ) A_ : Any = nn.Parameter( config.layer_scale_init_value * torch.ones((lowercase) ) , requires_grad=lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.use_layer_scale: A_ : Optional[int] = self.pooling(self.before_norm(lowercase ) ) A_ : Optional[int] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection A_ : List[Any] = hidden_states + self.drop_path(lowercase ) A_ : Dict = () A_ : Union[str, Any] = self.output(self.after_norm(lowercase ) ) A_ : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection A_ : Any = hidden_states + self.drop_path(lowercase ) A_ : Optional[int] = (output,) + outputs return outputs else: A_ : str = self.drop_path(self.pooling(self.before_norm(lowercase ) ) ) # First residual connection A_ : Union[str, Any] = pooling_output + hidden_states A_ : List[str] = () # Second residual connection inside the PoolFormerOutput block A_ : Optional[int] = self.drop_path(self.output(self.after_norm(lowercase ) ) ) A_ : Any = hidden_states + layer_output A_ : Optional[int] = (output,) + outputs return outputs class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" super().__init__() A_ : Any = config # stochastic depth decay rule A_ : Union[str, Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings A_ : List[Any] = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) A_ : List[Any] = nn.ModuleList(lowercase ) # Transformer blocks A_ : str = [] A_ : int = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers A_ : int = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( lowercase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(lowercase ) ) A_ : Tuple = nn.ModuleList(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase=False , lowercase=True ): """simple docstring""" A_ : Optional[Any] = () if output_hidden_states else None A_ : Tuple = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): A_ : List[Any] = layers # Get patch embeddings from hidden_states A_ : Union[str, Any] = embedding_layer(lowercase ) # Send the embeddings through the blocks for _, blk in enumerate(lowercase ): A_ : List[Any] = blk(lowercase ) A_ : Tuple = layer_outputs[0] if output_hidden_states: A_ : Union[str, Any] = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = PoolFormerConfig lowerCamelCase_ = '''poolformer''' lowerCamelCase_ = '''pixel_values''' lowerCamelCase_ = True def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if isinstance(lowercase , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowercase , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def lowerCAmelCase_ ( self , lowercase , lowercase=False ): """simple docstring""" if isinstance(lowercase , lowercase ): A_ : Dict = value _UpperCAmelCase = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. """ @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" super().__init__(lowercase ) A_ : str = config A_ : Dict = PoolFormerEncoder(lowercase ) # Initialize weights and apply final processing self.post_init() def lowerCAmelCase_ ( self ): """simple docstring""" return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Any = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) A_ : List[str] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , ) A_ : Optional[int] = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=lowercase , hidden_states=encoder_outputs.hidden_states , ) class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" super().__init__() A_ : Union[str, Any] = nn.Linear(config.hidden_size , config.hidden_size ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[Any] = self.dense(lowercase ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase ): """simple docstring""" super().__init__(lowercase ) A_ : Union[str, Any] = config.num_labels A_ : List[Any] = PoolFormerModel(lowercase ) # Final norm A_ : Any = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head A_ : List[str] = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , ): """simple docstring""" A_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict A_ : str = self.poolformer( lowercase , output_hidden_states=lowercase , return_dict=lowercase , ) A_ : List[str] = outputs[0] A_ : Dict = self.classifier(self.norm(lowercase ).mean([-2, -1] ) ) A_ : int = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: A_ : Any = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): A_ : List[Any] = 'single_label_classification' else: A_ : Union[str, Any] = 'multi_label_classification' if self.config.problem_type == "regression": A_ : int = MSELoss() if self.num_labels == 1: A_ : str = loss_fct(logits.squeeze() , labels.squeeze() ) else: A_ : str = loss_fct(lowercase , lowercase ) elif self.config.problem_type == "single_label_classification": A_ : Dict = CrossEntropyLoss() A_ : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": A_ : List[Any] = BCEWithLogitsLoss() A_ : Optional[int] = loss_fct(lowercase , lowercase ) if not return_dict: A_ : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
704
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : List[Any] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) ) self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : str = get_activation('gelu' ) A_ : int = get_activation('gelu_10' ) A_ : Optional[int] = torch_builtin(lowercase ) A_ : Tuple = geluaa(lowercase ) A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(lowercase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase_ ( self ): """simple docstring""" get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(lowercase ): get_activation('bogus' ) with self.assertRaises(lowercase ): get_activation(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = get_activation('gelu' ) A_ : List[str] = 1 A_ : Optional[Any] = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(lowercase ): A_ : str = acta.a
70
0
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def UpperCamelCase ( ): '''simple docstring''' print('Making key files...' ) make_key_files('rsa' ,10_24 ) print('Key files generation successful.' ) def UpperCamelCase ( __lowercase : int ): '''simple docstring''' print('Generating prime p...' ) A_ : List[str] = rabinMiller.generate_large_prime(__lowercase ) print('Generating prime q...' ) A_ : List[str] = rabinMiller.generate_large_prime(__lowercase ) A_ : Optional[Any] = p * q print('Generating e that is relatively prime to (p - 1) * (q - 1)...' ) while True: A_ : Optional[int] = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) ) if cryptoMath.gcd(__lowercase ,(p - 1) * (q - 1) ) == 1: break print('Calculating d that is mod inverse of e...' ) A_ : Optional[int] = cryptoMath.find_mod_inverse(__lowercase ,(p - 1) * (q - 1) ) A_ : Optional[Any] = (n, e) A_ : Optional[Any] = (n, d) return (public_key, private_key) def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print('\nWARNING:' ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' 'Use a different name or delete these files and re-run this program.' ) sys.exit() A_ : List[str] = generate_key(__lowercase ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' ,'w' ) as out_file: out_file.write(f'''{key_size},{public_key[0]},{public_key[1]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' ,'w' ) as out_file: out_file.write(f'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
705
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """RegNetConfig""" # Base docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , ) A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[str] = self.convolution(self.padding(lowercase ) ) A_ : List[str] = self.normalization(lowercase ) A_ : List[Any] = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[int] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[int] = self.embedder(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' ) A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" return self.normalization(self.convolution(lowercase ) , training=lowercase ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) A_ : Optional[Any] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = self.pooler(lowercase ) for layer_module in self.attention: A_ : Optional[Any] = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : Optional[int] = max(1 , out_channels // config.groups_width ) A_ : List[Any] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Optional[int] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ), ] A_ : List[str] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : int = layer_module(lowercase ) A_ : Union[str, Any] = self.shortcut(lowercase ) hidden_state += residual A_ : Dict = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : int = max(1 , out_channels // config.groups_width ) A_ : Optional[int] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) A_ : List[str] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ), ] A_ : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = hidden_state for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer A_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ), *[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ): """simple docstring""" A_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Dict = hidden_states + (hidden_state,) A_ : List[Any] = stage_module(lowercase ) if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' lowerCamelCase_ = RegNetConfig def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[Any] = config A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' ) A_ : str = TFRegNetEncoder(lowercase , name='encoder' ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase ) A_ : Optional[int] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Dict = encoder_outputs[0] A_ : List[Any] = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = RegNetConfig lowerCamelCase_ = '''regnet''' lowerCamelCase_ = '''pixel_values''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _UpperCAmelCase = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : int = TFRegNetMainLayer(lowercase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Tuple = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) class UpperCAmelCase ( __A , __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : List[Any] = config.num_labels A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' ) # classification head A_ : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : List[Any] = self.classifier[0](lowercase ) A_ : Union[str, Any] = self.classifier[1](lowercase ) A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
70
0
import os import sys import tempfile import torch from .state import AcceleratorState from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any]=() ,__lowercase : List[Any]=None ,__lowercase : str="no" ,__lowercase : Optional[Any]="29500" ): '''simple docstring''' A_ : Tuple = False A_ : Union[str, Any] = False if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ): A_ : Dict = True elif "IPython" in sys.modules: A_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() ) try: A_ : Tuple = PrecisionType(mixed_precision.lower() ) except ValueError: raise ValueError( f'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' ) if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' ,__lowercase ) is not None): # TPU launch import torch_xla.distributed.xla_multiprocessing as xmp if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside ' 'your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if num_processes is None: A_ : List[Any] = 8 A_ : Optional[int] = PrepareForLaunch(__lowercase ,distributed_type='TPU' ) print(f'''Launching a training on {num_processes} TPU cores.''' ) xmp.spawn(__lowercase ,args=__lowercase ,nprocs=__lowercase ,start_method='fork' ) elif in_colab: # No need for a distributed launch otherwise as it's either CPU or one GPU. if torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on one CPU.' ) function(*__lowercase ) else: if num_processes is None: raise ValueError( 'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' ) if num_processes > 1: # Multi-GPU launch from torch.multiprocessing import start_processes from torch.multiprocessing.spawn import ProcessRaisedException if len(AcceleratorState._shared_state ) > 0: raise ValueError( 'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized ' 'inside your training function. Restart your notebook and make sure no cells initializes an ' '`Accelerator`.' ) if torch.cuda.is_initialized(): raise ValueError( 'To launch a multi-GPU training from your notebook, you need to avoid running any instruction ' 'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA ' 'function.' ) # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__lowercase ,master_addr='127.0.01' ,master_port=__lowercase ,mixed_precision=__lowercase ): A_ : Tuple = PrepareForLaunch(__lowercase ,distributed_type='MULTI_GPU' ) print(f'''Launching training on {num_processes} GPUs.''' ) try: start_processes(__lowercase ,args=__lowercase ,nprocs=__lowercase ,start_method='fork' ) except ProcessRaisedException as e: if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]: raise RuntimeError( 'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. ' 'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. ' 'Please review your imports and test them when running the `notebook_launcher()` to identify ' 'which one is problematic.' ) from e else: # No need for a distributed launch otherwise as it's either CPU, GPU or MPS. if is_mps_available(): A_ : Optional[Any] = '1' print('Launching training on MPS.' ) elif torch.cuda.is_available(): print('Launching training on one GPU.' ) else: print('Launching training on CPU.' ) function(*__lowercase ) def UpperCamelCase ( __lowercase : str ,__lowercase : Union[str, Any]=() ,__lowercase : Any=2 ): '''simple docstring''' from torch.multiprocessing import start_processes with tempfile.NamedTemporaryFile() as tmp_file: # torch.distributed will expect a few environment variable to be here. We set the ones common to each # process here (the other ones will be set be the launcher). with patch_environment( world_size=__lowercase ,master_addr='127.0.01' ,master_port='29500' ,accelerate_mixed_precision='no' ,accelerate_debug_rdv_file=tmp_file.name ,accelerate_use_cpu='yes' ,): A_ : Any = PrepareForLaunch(__lowercase ,debug=__lowercase ) start_processes(__lowercase ,args=__lowercase ,nprocs=__lowercase ,start_method='fork' )
706
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): '''simple docstring''' A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase ) A_ : Any = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowercase ) env_command_parser(subparsers=__lowercase ) launch_command_parser(subparsers=__lowercase ) tpu_command_parser(subparsers=__lowercase ) test_command_parser(subparsers=__lowercase ) # Let's go A_ : Optional[Any] = parser.parse_args() if not hasattr(__lowercase ,'func' ): parser.print_help() exit(1 ) # Run args.func(__lowercase ) if __name__ == "__main__": main()
707
def UpperCamelCase ( __lowercase : list ): '''simple docstring''' A_ : str = len(__lowercase ) for _ in range(__lowercase ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: A_ , A_ : Optional[Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _UpperCAmelCase = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
70
0
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = BertTokenizer lowerCamelCase_ = BertTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True lowerCamelCase_ = filter_non_english def lowerCAmelCase_ ( self ): """simple docstring""" super().setUp() A_ : int = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] A_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = 'UNwant\u00E9d,running' A_ : Optional[int] = 'unwanted, running' return input_text, output_text def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = self.tokenizer_class(self.vocab_file ) A_ : int = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(lowercase , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , [9, 6, 7, 1_2, 1_0, 1_1] ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : List[str] = self.get_tokenizer() A_ : Optional[Any] = self.get_rust_tokenizer() A_ : Optional[Any] = 'UNwant\u00E9d,running' A_ : str = tokenizer.tokenize(lowercase ) A_ : Dict = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : int = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : Dict = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : Tuple = self.get_rust_tokenizer() A_ : Optional[int] = tokenizer.encode(lowercase ) A_ : Any = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) # With lower casing A_ : Union[str, Any] = self.get_tokenizer(do_lower_case=lowercase ) A_ : Tuple = self.get_rust_tokenizer(do_lower_case=lowercase ) A_ : List[Any] = 'UNwant\u00E9d,running' A_ : Optional[int] = tokenizer.tokenize(lowercase ) A_ : Union[str, Any] = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : List[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : str = self.get_rust_tokenizer() A_ : Tuple = tokenizer.encode(lowercase ) A_ : Union[str, Any] = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = BasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = BasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = BasicTokenizer(do_lower_case=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = BasicTokenizer(do_lower_case=lowercase , strip_accents=lowercase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = BasicTokenizer(do_lower_case=lowercase , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = BasicTokenizer() A_ : int = 'a\n\'ll !!to?\'d of, can\'t.' A_ : Any = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.'] self.assertListEqual(tokenizer.tokenize(lowercase ) , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] A_ : Optional[Any] = {} for i, token in enumerate(lowercase ): A_ : Dict = i A_ : Any = WordpieceTokenizer(vocab=lowercase , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) def lowerCAmelCase_ ( self ): """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = self.get_tokenizer() A_ : Any = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(lowercase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.tokenizer_class.from_pretrained('bert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : int = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [1_0_1] + text + [1_0_2] assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2] def lowerCAmelCase_ ( self ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ : int = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : Optional[int] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' A_ : Union[str, Any] = tokenizer_r.encode_plus( lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase , ) A_ : List[Any] = tokenizer_r.do_lower_case if hasattr(lowercase , 'do_lower_case' ) else False A_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), 'Allen'), ((2_1, 2_3), '##NL'), ((2_3, 2_4), '##P'), ((2_5, 3_3), 'sentence'), ((3_3, 3_4), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 1_5), tokenizer_r.mask_token), ((1_6, 2_1), 'allen'), ((2_1, 2_3), '##nl'), ((2_3, 2_4), '##p'), ((2_5, 3_3), 'sentence'), ((3_3, 3_4), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = ['的', '人', '有'] A_ : int = ''.join(lowercase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ : int = True A_ : List[str] = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : Any = tokenizer_p.encode(lowercase , add_special_tokens=lowercase ) A_ : int = tokenizer_r.encode(lowercase , add_special_tokens=lowercase ) A_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowercase ) A_ : Any = tokenizer_p.convert_ids_to_tokens(lowercase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : str = False A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : List[Any] = self.tokenizer_class.from_pretrained(lowercase , **lowercase ) A_ : Union[str, Any] = tokenizer_r.encode(lowercase , add_special_tokens=lowercase ) A_ : Tuple = tokenizer_p.encode(lowercase , add_special_tokens=lowercase ) A_ : Any = tokenizer_r.convert_ids_to_tokens(lowercase ) A_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(lowercase ) # it is expected that only the first Chinese character is not preceded by "##". A_ : Dict = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(lowercase ) ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , lowercase )
708
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''wavlm''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : List[Any] = hidden_size A_ : Tuple = feat_extract_norm A_ : Dict = feat_extract_activation A_ : Optional[Any] = list(lowercase ) A_ : Union[str, Any] = list(lowercase ) A_ : List[str] = list(lowercase ) A_ : str = conv_bias A_ : Tuple = num_buckets A_ : Union[str, Any] = max_bucket_distance A_ : int = num_conv_pos_embeddings A_ : str = num_conv_pos_embedding_groups A_ : str = len(self.conv_dim ) A_ : Tuple = num_hidden_layers A_ : Tuple = intermediate_size A_ : Optional[Any] = hidden_act A_ : Optional[Any] = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Optional[Any] = activation_dropout A_ : Optional[int] = feat_proj_dropout A_ : List[Any] = final_dropout A_ : Union[str, Any] = layerdrop A_ : Dict = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : str = num_ctc_classes A_ : Any = vocab_size A_ : str = do_stable_layer_norm A_ : int = use_weighted_layer_sum A_ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : List[str] = apply_spec_augment A_ : Optional[Any] = mask_time_prob A_ : int = mask_time_length A_ : Any = mask_time_min_masks A_ : Optional[int] = mask_feature_prob A_ : Tuple = mask_feature_length # parameters for pretraining with codevector quantized representations A_ : int = num_codevectors_per_group A_ : Any = num_codevector_groups A_ : List[Any] = contrastive_logits_temperature A_ : Optional[Any] = num_negatives A_ : Optional[Any] = codevector_dim A_ : int = proj_codevector_dim A_ : int = diversity_loss_weight # ctc loss A_ : Union[str, Any] = ctc_loss_reduction A_ : Any = ctc_zero_infinity # adapter A_ : int = add_adapter A_ : Optional[Any] = adapter_kernel_size A_ : Optional[int] = adapter_stride A_ : Dict = num_adapter_layers A_ : str = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Tuple = list(lowercase ) A_ : Optional[Any] = list(lowercase ) A_ : Dict = list(lowercase ) A_ : Dict = xvector_output_dim @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
70
0
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
709
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger() def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase ) else: A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase ) if hidden_sizes == 1_92: A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase ) if hidden_sizes == 2_56: A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase ) if hidden_sizes == 3_84: A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase ) from_model.eval() A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval() A_ : Union[str, Any] = OrderedDict() A_ : Dict = from_model.state_dict() A_ : Tuple = list(from_model.state_dict().keys() ) A_ : str = list(our_model.state_dict().keys() ) print(len(__lowercase ) ,len(__lowercase ) ) for i in range(len(__lowercase ) ): A_ : str = weights[og_keys[i]] our_model.load_state_dict(__lowercase ) A_ : str = torch.randn((2, 3, 2_24, 2_24) ) A_ : str = from_model(__lowercase ) A_ : Optional[Any] = our_model(__lowercase ).logits assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one." A_ : List[str] = name print(__lowercase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) A_ : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ): '''simple docstring''' A_ : Dict = 'imagenet-1k-id2label.json' A_ : Optional[int] = 10_00 A_ : Optional[int] = (1, num_labels) A_ : int = 'huggingface/label-files' A_ : int = num_labels A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : int = {int(__lowercase ): v for k, v in idalabel.items()} A_ : List[str] = idalabel A_ : str = {v: k for k, v in idalabel.items()} A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase ) A_ : Any = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } A_ : Tuple = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
70
0
from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class UpperCAmelCase : '''simple docstring''' lowerCamelCase_ = BlenderbotConfig lowerCamelCase_ = {} lowerCamelCase_ = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , ): """simple docstring""" A_ : Any = parent A_ : Any = batch_size A_ : Tuple = seq_length A_ : Dict = is_training A_ : str = use_labels A_ : Optional[int] = vocab_size A_ : List[str] = hidden_size A_ : List[str] = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : str = intermediate_size A_ : int = hidden_dropout_prob A_ : str = attention_probs_dropout_prob A_ : List[Any] = max_position_embeddings A_ : Optional[Any] = eos_token_id A_ : Tuple = pad_token_id A_ : Union[str, Any] = bos_token_id def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) A_ : Optional[int] = prepare_blenderbot_inputs_dict(lowercase , lowercase , lowercase ) return config, inputs_dict def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Optional[int] = TFBlenderbotModel(config=lowercase ).get_decoder() A_ : Union[str, Any] = inputs_dict['input_ids'] A_ : Any = input_ids[:1, :] A_ : Tuple = inputs_dict['attention_mask'][:1, :] A_ : int = inputs_dict['head_mask'] A_ : Union[str, Any] = 1 # first forward pass A_ : Union[str, Any] = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase ) A_ : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : Any = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : int = model(lowercase , attention_mask=lowercase )[0] A_ : int = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : int = output_from_no_past[:, -3:, random_slice_idx] A_ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def UpperCamelCase ( __lowercase : str ,__lowercase : List[Any] ,__lowercase : Optional[int] ,__lowercase : Any=None ,__lowercase : str=None ,__lowercase : Optional[int]=None ,__lowercase : Any=None ,__lowercase : int=None ,): '''simple docstring''' if attention_mask is None: A_ : Optional[int] = tf.cast(tf.math.not_equal(__lowercase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: A_ : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: A_ : Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: A_ : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () lowerCamelCase_ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () lowerCamelCase_ = ( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = TFBlenderbotModelTester(self ) A_ : Tuple = ConfigTester(self , config_class=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) @require_tokenizers @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = ['''My friends are cool but they eat too many carbs.'''] lowerCamelCase_ = '''facebook/blenderbot-400M-distill''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.tokenizer(self.src_text , return_tensors='tf' ) A_ : List[Any] = self.model.generate( model_inputs.input_ids , ) A_ : Tuple = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
710
def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' A_ : int = word.split() def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str: A_ : Optional[Any] = max_width - width A_ : Union[str, Any] = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: A_ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] A_ : int = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] A_ : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 A_ : Tuple = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) A_ : List[str] = [] A_ : list[str] = [] A_ : Dict = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase ,__lowercase ,__lowercase ) ) # reset new line and new width A_ , A_ : Any = [word], len(__lowercase ) A_ : int = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
70
0
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = tempfile.mkdtemp() A_ : Dict = BlipImageProcessor() A_ : int = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) A_ : List[Any] = BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert' ) A_ : Any = InstructBlipProcessor(lowercase , lowercase , lowercase ) processor.save_pretrained(self.tmpdirname ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer def lowerCAmelCase_ ( self ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] A_ : Tuple = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs] return image_inputs def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname ) A_ : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) A_ : Optional[int] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 ) A_ : Dict = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase ) self.assertIsInstance(processor.qformer_tokenizer , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : List[Any] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Any = self.prepare_image_inputs() A_ : List[Any] = image_processor(lowercase , return_tensors='np' ) A_ : str = processor(images=lowercase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.get_image_processor() A_ : Dict = self.get_tokenizer() A_ : List[Any] = self.get_qformer_tokenizer() A_ : Optional[Any] = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : List[Any] = 'lower newer' A_ : Optional[Any] = processor(text=lowercase ) A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase ) A_ : List[str] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['qformer_' + key] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.get_image_processor() A_ : Any = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : Tuple = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : str = 'lower newer' A_ : int = self.prepare_image_inputs() A_ : Any = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , ) # test if it raises when no input is passed with pytest.raises(lowercase ): processor() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = self.get_image_processor() A_ : List[Any] = self.get_tokenizer() A_ : Tuple = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(lowercase ) A_ : List[Any] = tokenizer.batch_decode(lowercase ) self.assertListEqual(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.get_image_processor() A_ : List[str] = self.get_tokenizer() A_ : Any = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase ) A_ : Union[str, Any] = 'lower newer' A_ : str = self.prepare_image_inputs() A_ : Tuple = processor(text=lowercase , images=lowercase ) self.assertListEqual( list(inputs.keys() ) , ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'] , )
711
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _UpperCAmelCase = logging.getLogger(__name__) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''summarization''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ROUGE_KEYS lowerCamelCase_ = '''rouge2''' def __init__( self , lowercase , **lowercase ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: A_ : str = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) A_ : List[str] = Path(self.output_dir ) / 'metrics.json' A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) A_ : str = 0 A_ : Any = defaultdict(lowercase ) A_ : Union[str, Any] = self.config.model_type A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size A_ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } A_ : Optional[Any] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} A_ : Tuple = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) A_ : int = get_git_info()['repo_sha'] A_ : int = hparams.num_workers A_ : Union[str, Any] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] A_ : Any = self.decoder_start_token_id A_ : str = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) A_ : Union[str, Any] = False A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: A_ : int = self.hparams.eval_max_gen_length else: A_ : List[Any] = self.model.config.max_length A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) A_ : int = True return readable_batch def lowerCAmelCase_ ( self , lowercase , **lowercase ): """simple docstring""" return self.model(lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[Any] = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = self.tokenizer.pad_token_id A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask'] A_ : str = batch['labels'] if isinstance(self.model , lowercase ): A_ : Optional[int] = self.model._shift_right(lowercase ) else: A_ : Any = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero A_ : Optional[Any] = decoder_input_ids self.save_readable_batch(lowercase ) A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) A_ : Dict = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 ) A_ , A_ : Any = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def lowerCAmelCase_ ( self ): """simple docstring""" return self.tokenizer.pad_token_id def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : str = self._step(lowercase ) A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) ) # tokens per batch A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() A_ : str = batch['input_ids'].shape[0] A_ : Any = batch['input_ids'].eq(self.pad ).sum() A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase="val" ): """simple docstring""" self.step_count += 1 A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} A_ : Dict = losses['loss'] A_ : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } A_ : Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} A_ : Tuple = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path A_ : Dict = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_rouge(lowercase , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') A_ : Optional[int] = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) A_ : int = (time.time() - ta) / batch['input_ids'].shape[0] A_ : List[str] = self.ids_to_clean_text(lowercase ) A_ : List[str] = self.ids_to_clean_text(batch['labels'] ) A_ : List[Any] = self._step(lowercase ) A_ : int = dict(zip(self.loss_names , lowercase ) ) A_ : Dict = self.calc_generative_metrics(lowercase , lowercase ) A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.validation_epoch_end(lowercase , prefix='test' ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = self.n_obs[type_path] A_ : List[Any] = self.target_lens[type_path] A_ : str = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ): """simple docstring""" A_ : Optional[int] = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": A_ : str = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( '--max_source_length' , default=1_0_2_4 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=5_6 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase ) parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase ) parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( '--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''translation''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ['''bleu'''] lowerCamelCase_ = '''bleu''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , **lowercase ) A_ : List[Any] = hparams.src_lang A_ : str = hparams.tgt_lang def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_bleu(lowercase , lowercase ) def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=__lowercase ) check_output_dir(__lowercase ,expected_items=3 ) if model is None: if "summarization" in args.task: A_ : SummarizationModule = SummarizationModule(__lowercase ) else: A_ : SummarizationModule = TranslationModule(__lowercase ) A_ : Optional[int] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): A_ : List[str] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase ) A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience ) else: A_ : str = False A_ : Dict = args.val_metric == 'loss' A_ : pl.Trainer = generic_train( __lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback( args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,) pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model A_ : Optional[Any] = '' A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) ) if checkpoints: A_ : List[Any] = checkpoints[-1] A_ : Any = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() _UpperCAmelCase = pl.Trainer.add_argparse_args(parser) _UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _UpperCAmelCase = parser.parse_args() main(args)
70
0
import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = GPTaTokenizer lowerCamelCase_ = GPTaTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = {'''add_prefix_space''': True} lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A_ : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] A_ : Dict = dict(zip(lowercase , range(len(lowercase ) ) ) ) A_ : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A_ : List[str] = {'unk_token': '<unk>'} A_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(lowercase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(lowercase ) ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **lowercase ) def lowerCAmelCase_ ( self , **lowercase ): """simple docstring""" kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[Any] = 'lower newer' A_ : Optional[int] = 'lower newer' return input_text, output_text def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) A_ : Any = 'lower newer' A_ : Optional[int] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] A_ : Optional[int] = tokenizer.tokenize(lowercase , add_prefix_space=lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : List[Any] = tokens + [tokenizer.unk_token] A_ : Any = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : List[Any] = self.get_tokenizer() A_ : Dict = self.get_rust_tokenizer(add_prefix_space=lowercase ) A_ : Dict = 'lower newer' # Testing tokenization A_ : int = tokenizer.tokenize(lowercase , add_prefix_space=lowercase ) A_ : List[str] = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing conversion to ids without special tokens A_ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase ) A_ : Tuple = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing conversion to ids with special tokens A_ : Any = self.get_rust_tokenizer(add_prefix_space=lowercase ) A_ : Any = tokenizer.encode(lowercase , add_prefix_space=lowercase ) A_ : int = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) # Testing the unknown token A_ : Union[str, Any] = tokens + [rust_tokenizer.unk_token] A_ : int = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(lowercase ) , lowercase ) def lowerCAmelCase_ ( self , *lowercase , **lowercase ): """simple docstring""" pass def lowerCAmelCase_ ( self , lowercase=1_5 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): A_ : Any = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase ) # Simple input A_ : List[str] = 'This is a simple input' A_ : List[str] = ['This is a simple input 1', 'This is a simple input 2'] A_ : Optional[int] = ('This is a simple input', 'This is a pair') A_ : List[Any] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' ) # Simple input self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' ) # Simple input self.assertRaises( lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , ) # Pair input self.assertRaises(lowercase , tokenizer_r.encode , lowercase , max_length=lowercase , padding='max_length' ) # Pair input self.assertRaises(lowercase , tokenizer_r.encode_plus , lowercase , max_length=lowercase , padding='max_length' ) # Pair input self.assertRaises( lowercase , tokenizer_r.batch_encode_plus , lowercase , max_length=lowercase , padding='max_length' , ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input A_ : Any = 'This is a simple input' A_ : Dict = ['This is a simple input looooooooong', 'This is a simple input'] A_ : Optional[Any] = ('This is a simple input', 'This is a pair') A_ : Optional[Any] = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] A_ : Any = tokenizer.pad_token_id A_ : Optional[Any] = tokenizer(lowercase , padding='max_length' , max_length=3_0 , return_tensors='np' ) A_ : List[str] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' ) A_ : Optional[int] = tokenizer(*lowercase , padding='max_length' , max_length=6_0 , return_tensors='np' ) A_ : List[Any] = tokenizer(lowercase , padding=lowercase , truncate=lowercase , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = '$$$' A_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=lowercase , add_bos_token=lowercase ) A_ : Dict = 'This is a simple input' A_ : List[Any] = ['This is a simple input 1', 'This is a simple input 2'] A_ : List[Any] = tokenizer.bos_token_id A_ : List[Any] = tokenizer(lowercase ) A_ : Optional[int] = tokenizer(lowercase ) self.assertEqual(out_s.input_ids[0] , lowercase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) A_ : Dict = tokenizer.decode(out_s.input_ids ) A_ : List[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , lowercase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = [self.get_tokenizer(do_lower_case=lowercase , add_bos_token=lowercase )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): A_ : Dict = 'Encode this.' A_ : Any = 'This one too please.' A_ : str = tokenizer.encode(lowercase , add_special_tokens=lowercase ) encoded_sequence += tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : Optional[int] = tokenizer.encode_plus( lowercase , lowercase , add_special_tokens=lowercase , return_special_tokens_mask=lowercase , ) A_ : Union[str, Any] = encoded_sequence_dict['input_ids'] A_ : Any = encoded_sequence_dict['special_tokens_mask'] self.assertEqual(len(lowercase ) , len(lowercase ) ) A_ : Optional[int] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(lowercase ) ] A_ : Optional[int] = [x for x in filtered_sequence if x is not None] self.assertEqual(lowercase , lowercase ) @require_tokenizers class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowercase ) A_ : Union[str, Any] = 'A photo of a cat' A_ : Optional[int] = tokenizer.encode( lowercase , ) self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('test_opt' ) A_ : List[str] = AutoTokenizer.from_pretrained('./test_opt' ) A_ : Union[str, Any] = tokenizer.encode( lowercase , ) self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=lowercase ) A_ : Tuple = 'A photo of a cat' A_ : Dict = tokenizer.encode( lowercase , ) # Same as above self.assertEqual(lowercase , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=lowercase ) A_ : Optional[Any] = 'bos' A_ : List[Any] = tokenizer.get_vocab()['bos'] A_ : Any = 'A photo of a cat' A_ : int = tokenizer.encode( lowercase , ) # We changed the bos token self.assertEqual(lowercase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('./tok' ) A_ : Union[str, Any] = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) A_ : Optional[int] = tokenizer.encode( lowercase , ) self.assertEqual(lowercase , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
712
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ): """simple docstring""" A_ : List[Any] = parent A_ : Optional[Any] = batch_size A_ : Dict = image_size A_ : str = num_channels A_ : Union[str, Any] = embeddings_size A_ : Optional[Any] = hidden_sizes A_ : Any = depths A_ : List[str] = is_training A_ : int = use_labels A_ : Optional[Any] = hidden_act A_ : List[Any] = num_labels A_ : Optional[int] = scope A_ : int = len(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Any = TFRegNetModel(config=lowercase ) A_ : Optional[Any] = model(lowercase , training=lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : int = self.num_labels A_ : Tuple = TFRegNetForImageClassification(lowercase ) A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : List[Any] = config_and_inputs A_ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase_ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFRegNetModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(lowercase ) A_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[Any] = [*signature.parameters.keys()] A_ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : List[Any] = model_class(lowercase ) A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase ) A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : int = layer_type A_ : Tuple = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Any = True check_hidden_states_output(lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase ) A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowercase , lowercase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: A_ : Dict = model_class(lowercase ) A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase ) A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : Any = self._prepare_for_class(lowercase , lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A_ : int = self.default_image_processor A_ : List[str] = prepare_img() A_ : Any = image_processor(images=lowercase , return_tensors='tf' ) # forward pass A_ : Tuple = model(**lowercase , training=lowercase ) # verify the logits A_ : int = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
70
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """microsoft/unispeech-large-1500h-cv""": ( """https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json""" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''unispeech''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=0.5 , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : Dict = hidden_size A_ : Dict = feat_extract_norm A_ : List[Any] = feat_extract_activation A_ : Optional[int] = list(lowercase ) A_ : Any = list(lowercase ) A_ : Optional[int] = list(lowercase ) A_ : str = conv_bias A_ : Any = num_conv_pos_embeddings A_ : List[str] = num_conv_pos_embedding_groups A_ : List[Any] = len(self.conv_dim ) A_ : List[str] = num_hidden_layers A_ : List[str] = intermediate_size A_ : str = hidden_act A_ : int = num_attention_heads A_ : str = hidden_dropout A_ : List[str] = attention_dropout A_ : Tuple = activation_dropout A_ : Optional[Any] = feat_proj_dropout A_ : Optional[Any] = final_dropout A_ : Union[str, Any] = layerdrop A_ : int = layer_norm_eps A_ : int = initializer_range A_ : Any = num_ctc_classes A_ : Optional[int] = vocab_size A_ : Optional[Any] = do_stable_layer_norm A_ : Union[str, Any] = use_weighted_layer_sum A_ : Any = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : List[str] = apply_spec_augment A_ : List[str] = mask_time_prob A_ : List[Any] = mask_time_length A_ : Any = mask_time_min_masks A_ : Union[str, Any] = mask_feature_prob A_ : Any = mask_feature_length A_ : Any = mask_feature_min_masks # parameters for pretraining with codevector quantized representations A_ : Any = num_codevectors_per_group A_ : Dict = num_codevector_groups A_ : Union[str, Any] = contrastive_logits_temperature A_ : Tuple = feat_quantizer_dropout A_ : Optional[int] = num_negatives A_ : Union[str, Any] = codevector_dim A_ : Tuple = proj_codevector_dim A_ : Union[str, Any] = diversity_loss_weight # ctc loss A_ : Any = ctc_loss_reduction A_ : int = ctc_zero_infinity # pretraining loss A_ : Optional[int] = replace_prob @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
713
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ): '''simple docstring''' A_ : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ): '''simple docstring''' A_ : int = 0 while b > 0: if b & 1: A_ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
70
0
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCAmelCase : '''simple docstring''' lowerCamelCase_ = LEDConfig lowerCamelCase_ = {} lowerCamelCase_ = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=4 , ): """simple docstring""" A_ : Union[str, Any] = parent A_ : Optional[int] = batch_size A_ : int = seq_length A_ : str = is_training A_ : Any = use_labels A_ : Union[str, Any] = vocab_size A_ : Dict = hidden_size A_ : Tuple = num_hidden_layers A_ : Optional[int] = num_attention_heads A_ : Dict = intermediate_size A_ : Optional[Any] = hidden_dropout_prob A_ : Tuple = attention_probs_dropout_prob A_ : List[Any] = max_position_embeddings A_ : int = eos_token_id A_ : Dict = pad_token_id A_ : Optional[Any] = bos_token_id A_ : int = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after A_ : str = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests A_ : str = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) A_ : int = prepare_led_inputs_dict(lowercase , lowercase , lowercase ) A_ : Tuple = tf.concat( [tf.zeros_like(lowercase )[:, :-1], tf.ones_like(lowercase )[:, -1:]] , axis=-1 , ) A_ : List[Any] = global_attention_mask return config, inputs_dict def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : str = TFLEDModel(config=lowercase ).get_decoder() A_ : Optional[int] = inputs_dict['input_ids'] A_ : Dict = input_ids[:1, :] A_ : Tuple = inputs_dict['attention_mask'][:1, :] A_ : int = 1 # first forward pass A_ : str = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) A_ : Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : Dict = model(lowercase , attention_mask=lowercase )[0] A_ : str = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : Dict = output_from_no_past[:, -3:, random_slice_idx] A_ : str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : str ,__lowercase : List[str] ,__lowercase : str=None ,__lowercase : Tuple=None ,__lowercase : List[Any]=None ,__lowercase : Tuple=None ,): '''simple docstring''' if attention_mask is None: A_ : Dict = tf.cast(tf.math.not_equal(__lowercase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: A_ : int = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: A_ : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: A_ : List[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCamelCase_ = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCamelCase_ = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase_ = True lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = TFLEDModelTester(self ) A_ : str = ConfigTester(self , config_class=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = tf.zeros_like(inputs_dict['attention_mask'] ) A_ : Tuple = 2 A_ : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , ) A_ : List[str] = True A_ : Dict = self.model_tester.seq_length A_ : str = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase ): A_ : Dict = outputs.decoder_attentions self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase ): A_ : Optional[int] = [t.numpy() for t in outputs.encoder_attentions] A_ : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: A_ : List[str] = True A_ : Union[str, Any] = False A_ : List[Any] = False A_ : Any = model_class(lowercase ) A_ : Any = model(self._prepare_for_class(lowercase , lowercase ) ) A_ : Any = len(lowercase ) self.assertEqual(config.output_hidden_states , lowercase ) check_encoder_attentions_output(lowercase ) if self.is_encoder_decoder: A_ : Dict = model_class(lowercase ) A_ : int = model(self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(config.output_hidden_states , lowercase ) check_decoder_attentions_output(lowercase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] A_ : Tuple = True A_ : Tuple = model_class(lowercase ) A_ : Tuple = model(self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(config.output_hidden_states , lowercase ) check_encoder_attentions_output(lowercase ) # Check attention is always last and order is fine A_ : List[str] = True A_ : Optional[int] = True A_ : str = model_class(lowercase ) A_ : int = model(self._prepare_for_class(lowercase , lowercase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase ) ) self.assertEqual(model.config.output_hidden_states , lowercase ) check_encoder_attentions_output(lowercase ) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" pass def UpperCamelCase ( __lowercase : Union[str, Any] ): '''simple docstring''' return tf.constant(__lowercase ,dtype=tf.intaa ) _UpperCAmelCase = 1e-4 @slow @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led # change to intended input here A_ : Any = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A_ : int = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A_ : List[str] = prepare_led_inputs_dict(model.config , lowercase , lowercase ) A_ : str = model(**lowercase )[0] A_ : Union[str, Any] = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape , lowercase ) # change to expected output here A_ : int = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ) # change to intended input here A_ : Union[str, Any] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A_ : List[str] = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) A_ : List[Any] = prepare_led_inputs_dict(model.config , lowercase , lowercase ) A_ : List[str] = model(**lowercase )[0] A_ : Dict = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape , lowercase ) # change to expected output here A_ : Any = tf.convert_to_tensor( [[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase , atol=1E-3 , rtol=1E-3 )
714
def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if length <= 0 or not isinstance(__lowercase ,__lowercase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(__lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
70
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
715
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase ( ): '''simple docstring''' A_ , A_ : Any = 9, 14 # noqa: F841 A_ : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : List[Any] = defaultdict(__lowercase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A_ : Tuple = mst(__lowercase ) A_ : Tuple = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : List[Any] = tuple(answer[:2] ) A_ : Union[str, Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
70
0
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase ( datasets.BuilderConfig ): '''simple docstring''' lowerCamelCase_ = None class UpperCAmelCase ( datasets.ArrowBasedBuilder ): '''simple docstring''' lowerCamelCase_ = PandasConfig def lowerCAmelCase_ ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) A_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(lowercase , (str, list, tuple) ): A_ : Tuple = data_files if isinstance(lowercase , lowercase ): A_ : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Dict = [dl_manager.iter_files(lowercase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] A_ : Optional[int] = [] for split_name, files in data_files.items(): if isinstance(lowercase , lowercase ): A_ : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive A_ : Optional[Any] = [dl_manager.iter_files(lowercase ) for file in files] splits.append(datasets.SplitGenerator(name=lowercase , gen_kwargs={'files': files} ) ) return splits def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example A_ : str = table_cast(lowercase , self.config.features.arrow_schema ) return pa_table def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(lowercase ) ): with open(lowercase , 'rb' ) as f: A_ : List[str] = pa.Table.from_pandas(pd.read_pickle(lowercase ) ) yield i, self._cast_table(lowercase )
716
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): '''simple docstring''' A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase ) A_ : Any = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowercase ) env_command_parser(subparsers=__lowercase ) launch_command_parser(subparsers=__lowercase ) tpu_command_parser(subparsers=__lowercase ) test_command_parser(subparsers=__lowercase ) # Let's go A_ : Optional[Any] = parser.parse_args() if not hasattr(__lowercase ,'func' ): parser.print_help() exit(1 ) # Run args.func(__lowercase ) if __name__ == "__main__": main()
70
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''gpt_neox''' def __init__( self , lowercase=5_0_4_3_2 , lowercase=6_1_4_4 , lowercase=4_4 , lowercase=6_4 , lowercase=2_4_5_7_6 , lowercase="gelu" , lowercase=0.25 , lowercase=1_0_0_0_0 , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=2_0_4_8 , lowercase=0.02 , lowercase=1E-5 , lowercase=True , lowercase=0 , lowercase=2 , lowercase=False , lowercase=True , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase ) A_ : Optional[int] = vocab_size A_ : Tuple = max_position_embeddings A_ : Optional[Any] = hidden_size A_ : Any = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : List[str] = intermediate_size A_ : Tuple = hidden_act A_ : Optional[Any] = rotary_pct A_ : int = rotary_emb_base A_ : Optional[Any] = attention_dropout A_ : List[Any] = hidden_dropout A_ : int = classifier_dropout A_ : Dict = initializer_range A_ : Optional[Any] = layer_norm_eps A_ : Optional[Any] = use_cache A_ : int = tie_word_embeddings A_ : Any = use_parallel_residual A_ : Optional[int] = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( 'The hidden size is not divisble by the number of attention heads! Make sure to update them!' ) def lowerCAmelCase_ ( self ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' F'''got {self.rope_scaling}''' ) A_ : Union[str, Any] = self.rope_scaling.get('type' , lowercase ) A_ : List[Any] = self.rope_scaling.get('factor' , lowercase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowercase , lowercase ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
717
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = DistilBertTokenizer lowerCamelCase_ = DistilBertTokenizerFast lowerCamelCase_ = True @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
70
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=1_3 , lowercase=3_0 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=1_0 , lowercase=0.02 , lowercase=3 , lowercase=None , lowercase=2 , ): """simple docstring""" A_ : List[str] = parent A_ : int = batch_size A_ : Optional[int] = image_size A_ : Optional[Any] = patch_size A_ : Optional[int] = num_channels A_ : Dict = is_training A_ : List[Any] = use_labels A_ : List[Any] = hidden_size A_ : Optional[int] = num_hidden_layers A_ : int = num_attention_heads A_ : Tuple = intermediate_size A_ : Union[str, Any] = hidden_act A_ : Any = hidden_dropout_prob A_ : Dict = attention_probs_dropout_prob A_ : Tuple = type_sequence_label_size A_ : Dict = initializer_range A_ : Optional[int] = scope A_ : Union[str, Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A_ : Tuple = (image_size // patch_size) ** 2 A_ : Union[str, Any] = num_patches + 2 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : str = None if self.use_labels: A_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : List[Any] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Tuple = TFDeiTModel(config=lowercase ) A_ : Union[str, Any] = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : List[Any] = TFDeiTForMaskedImageModeling(config=lowercase ) A_ : str = model(lowercase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A_ : str = 1 A_ : str = TFDeiTForMaskedImageModeling(lowercase ) A_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : Any = model(lowercase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Optional[int] = self.type_sequence_label_size A_ : Dict = TFDeiTForImageClassification(lowercase ) A_ : Tuple = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A_ : List[str] = 1 A_ : Union[str, Any] = TFDeiTForImageClassification(lowercase ) A_ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A_ : List[Any] = model(lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.prepare_config_and_inputs() A_ : Any = config_and_inputs A_ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) lowerCamelCase_ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFDeiTModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=3_7 ) def lowerCAmelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Union[str, Any] = model_class(lowercase ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A_ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Dense ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : List[Any] = model_class(lowercase ) A_ : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : List[Any] = [*signature.parameters.keys()] A_ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=False ): """simple docstring""" A_ : List[str] = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = TFDeiTModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def UpperCamelCase ( ): '''simple docstring''' A_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ) A_ : Union[str, Any] = self.default_image_processor A_ : List[Any] = prepare_img() A_ : Any = image_processor(images=lowercase , return_tensors='tf' ) # forward pass A_ : Optional[Any] = model(**lowercase ) # verify the logits A_ : str = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : Tuple = tf.constant([-1.0266, 0.1912, -1.2861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
718
import random def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Tuple = num - 1 A_ : Optional[Any] = 0 while s % 2 == 0: A_ : Optional[int] = s // 2 t += 1 for _ in range(5 ): A_ : Optional[int] = random.randrange(2 ,num - 1 ) A_ : Any = pow(__lowercase ,__lowercase ,__lowercase ) if v != 1: A_ : List[str] = 0 while v != (num - 1): if i == t - 1: return False else: A_ : Union[str, Any] = i + 1 A_ : Tuple = (v**2) % num return True def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if num < 2: return False A_ : Optional[Any] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowercase ) def UpperCamelCase ( __lowercase : int = 10_24 ): '''simple docstring''' while True: A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) ) if is_prime_low_num(__lowercase ): return num if __name__ == "__main__": _UpperCAmelCase = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
70
0
from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def UpperCamelCase ( __lowercase : NDArray[floataa] ,__lowercase : NDArray[floataa] ,__lowercase : list[int] ,__lowercase : int ,): '''simple docstring''' A_ : Optional[int] = coefficient_matrix.shape A_ : Tuple = constant_matrix.shape if rowsa != colsa: A_ : Dict = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}''' raise ValueError(__lowercase ) if colsa != 1: A_ : Union[str, Any] = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}''' raise ValueError(__lowercase ) if rowsa != rowsa: A_ : Dict = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' f'''received {rowsa}x{colsa} and {rowsa}x{colsa}''' ) raise ValueError(__lowercase ) if len(__lowercase ) != rowsa: A_ : str = ( 'Number of initial values must be equal to number of rows in coefficient ' f'''matrix but received {len(__lowercase )} and {rowsa}''' ) raise ValueError(__lowercase ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) A_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) ,axis=1 ) A_ : List[Any] = table.shape strictly_diagonally_dominant(__lowercase ) # Iterates the whole matrix for given number of times for _ in range(__lowercase ): A_ : Any = [] for row in range(__lowercase ): A_ : int = 0 for col in range(__lowercase ): if col == row: A_ : Union[str, Any] = table[row][col] elif col == cols - 1: A_ : Optional[Any] = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ : List[str] = (temp + val) / denom new_val.append(__lowercase ) A_ : Tuple = new_val return [float(__lowercase ) for i in new_val] def UpperCamelCase ( __lowercase : NDArray[floataa] ): '''simple docstring''' A_ : List[str] = table.shape A_ : Optional[Any] = True for i in range(0 ,__lowercase ): A_ : List[Any] = 0 for j in range(0 ,cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
from __future__ import annotations def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Tuple = str(__lowercase ) return len(__lowercase ) == 9 and set(__lowercase ) == set('123456789' ) def UpperCamelCase ( ): '''simple docstring''' for base_num in range(99_99 ,49_99 ,-1 ): A_ : List[str] = 10_00_02 * base_num if is_9_pandigital(__lowercase ): return candidate for base_num in range(3_33 ,99 ,-1 ): A_ : Tuple = 1_00_20_03 * base_num if is_9_pandigital(__lowercase ): return candidate return None if __name__ == "__main__": print(F"""{solution() = }""")
720
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = FlaxAutoencoderKL @property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = 4 A_ : int = 3 A_ : List[str] = (3_2, 3_2) A_ : Any = jax.random.PRNGKey(0 ) A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } A_ : int = self.dummy_input return init_dict, inputs_dict
70
0
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase ( __A , __A , __A ): '''simple docstring''' lowerCamelCase_ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias'''] @register_to_config def __init__( self , lowercase , lowercase , lowercase = None , lowercase = 5_0_2_5_7 , lowercase = 1_0_2_4 , lowercase = 7_6_8 , lowercase = 1_2 , lowercase = 1_2 , lowercase = None , lowercase = "gelu_new" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 1E-5 , lowercase = 0.02 , lowercase = True , lowercase = True , lowercase = False , lowercase = False , ): """simple docstring""" super().__init__() A_ : Tuple = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) A_ : Optional[Any] = prefix_inner_dim A_ : Any = prefix_hidden_dim A_ : Any = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : List[Any] = ( nn.Linear(self.prefix_hidden_dim , lowercase ) if self.prefix_hidden_dim is not None else nn.Identity() ) A_ : str = GPTaConfig( vocab_size=lowercase , n_positions=lowercase , n_embd=lowercase , n_layer=lowercase , n_head=lowercase , n_inner=lowercase , activation_function=lowercase , resid_pdrop=lowercase , embd_pdrop=lowercase , attn_pdrop=lowercase , layer_norm_epsilon=lowercase , initializer_range=lowercase , scale_attn_weights=lowercase , use_cache=lowercase , scale_attn_by_inverse_layer_idx=lowercase , reorder_and_upcast_attn=lowercase , ) A_ : List[Any] = GPTaLMHeadModel(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = None , lowercase = None , ): """simple docstring""" A_ : Tuple = self.transformer.transformer.wte(lowercase ) A_ : Optional[int] = self.encode_prefix(lowercase ) A_ : str = self.decode_prefix(lowercase ) A_ : Tuple = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: A_ : Tuple = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) A_ : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) A_ : List[str] = self.transformer(inputs_embeds=lowercase , labels=lowercase , attention_mask=lowercase ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return torch.zeros(lowercase , self.prefix_length , dtype=torch.intaa , device=lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.encode_prefix(lowercase ) @torch.no_grad() def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Any = torch.split(lowercase , 1 , dim=0 ) A_ : Union[str, Any] = [] A_ : Optional[Any] = [] for feature in features: A_ : Optional[int] = self.decode_prefix(feature.to(lowercase ) ) # back to the clip feature # Only support beam search for now A_ : Dict = self.generate_beam( input_embeds=lowercase , device=lowercase , eos_token_id=lowercase ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) A_ : Any = torch.stack(lowercase ) A_ : int = torch.stack(lowercase ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowerCAmelCase_ ( self , lowercase=None , lowercase=None , lowercase=None , lowercase = 5 , lowercase = 6_7 , lowercase = 1.0 , lowercase = None , ): """simple docstring""" A_ : Optional[Any] = eos_token_id A_ : Optional[Any] = None A_ : Dict = None A_ : Union[str, Any] = torch.ones(lowercase , device=lowercase , dtype=torch.int ) A_ : int = torch.zeros(lowercase , device=lowercase , dtype=torch.bool ) if input_embeds is not None: A_ : Optional[Any] = input_embeds else: A_ : Dict = self.transformer.transformer.wte(lowercase ) for i in range(lowercase ): A_ : Dict = self.transformer(inputs_embeds=lowercase ) A_ : Optional[Any] = outputs.logits A_ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) A_ : List[str] = logits.softmax(-1 ).log() if scores is None: A_ : int = logits.topk(lowercase , -1 ) A_ : Any = generated.expand(lowercase , *generated.shape[1:] ) A_ : Tuple = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: A_ : Tuple = next_tokens else: A_ : int = tokens.expand(lowercase , *tokens.shape[1:] ) A_ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 ) else: A_ : Tuple = -float(np.inf ) A_ : int = 0 A_ : Any = scores[:, None] + logits seq_lengths[~is_stopped] += 1 A_ : Union[str, Any] = scores_sum / seq_lengths[:, None] A_ : List[Any] = scores_sum_average.view(-1 ).topk(lowercase , -1 ) A_ : Optional[Any] = next_tokens // scores_sum.shape[1] A_ : str = seq_lengths[next_tokens_source] A_ : List[str] = next_tokens % scores_sum.shape[1] A_ : Dict = next_tokens.unsqueeze(1 ) A_ : Dict = tokens[next_tokens_source] A_ : int = torch.cat((tokens, next_tokens) , dim=1 ) A_ : Dict = generated[next_tokens_source] A_ : Dict = scores_sum_average * seq_lengths A_ : str = is_stopped[next_tokens_source] A_ : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) A_ : Dict = torch.cat((generated, next_token_embed) , dim=1 ) A_ : Optional[int] = is_stopped + next_tokens.eq(lowercase ).squeeze() if is_stopped.all(): break A_ : List[str] = scores / seq_lengths A_ : List[str] = scores.argsort(descending=lowercase ) # tokens tensors are already padded to max_seq_length A_ : Union[str, Any] = [tokens[i] for i in order] A_ : Union[str, Any] = torch.stack(lowercase , dim=0 ) A_ : Any = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
721
import numpy as np _UpperCAmelCase = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" A_ : Any = np.array(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE ) A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : int = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = message.lower() A_ : Tuple = message.replace(' ' , '' ) A_ : int = message.replace('j' , 'i' ) A_ : Any = np.empty((2, len(lowercase )) ) for letter_index in range(len(lowercase ) ): A_ : Optional[int] = self.letter_to_numbers(message[letter_index] ) A_ : Union[str, Any] = numbers[0] A_ : Union[str, Any] = numbers[1] A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) ) A_ : int = '' for numbers_index in range(len(lowercase ) ): A_ : str = int(second_step[numbers_index * 2] ) A_ : str = int(second_step[(numbers_index * 2) + 1] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : Tuple = encoded_message + letter return encoded_message def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[int] = message.lower() message.replace(' ' , '' ) A_ : Tuple = np.empty(2 * len(lowercase ) ) for letter_index in range(len(lowercase ) ): A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] ) A_ : Optional[int] = numbers[0] A_ : Dict = numbers[1] A_ : Optional[int] = first_step.reshape((2, len(lowercase )) ) A_ : List[str] = '' for numbers_index in range(len(lowercase ) ): A_ : List[Any] = int(second_step[0, numbers_index] ) A_ : Optional[int] = int(second_step[1, numbers_index] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : str = decoded_message + letter return decoded_message
70
0
'''simple docstring''' import qiskit def UpperCamelCase ( __lowercase : int ,__lowercase : int ): '''simple docstring''' A_ : List[Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register A_ : Union[str, Any] = qiskit.QuantumCircuit(__lowercase ,__lowercase ) # Map the quantum measurement to the classical bits circuit.measure([0] ,[0] ) # Execute the circuit on the simulator A_ : List[str] = qiskit.execute(__lowercase ,__lowercase ,shots=10_00 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__lowercase ) if __name__ == "__main__": print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
700
from math import sqrt def UpperCamelCase ( __lowercase : int = 1_00_00_00 ): '''simple docstring''' A_ : int = 0 A_ : int = 0 A_ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowercase ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
70
0
import os # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_doctest_list.py _UpperCAmelCase = """.""" if __name__ == "__main__": _UpperCAmelCase = os.path.join(REPO_PATH, """utils/documentation_tests.txt""") _UpperCAmelCase = [] _UpperCAmelCase = [] with open(doctest_file_path) as fp: for line in fp: _UpperCAmelCase = line.strip() _UpperCAmelCase = os.path.join(REPO_PATH, line) if not (os.path.isfile(path) or os.path.isdir(path)): non_existent_paths.append(line) all_paths.append(path) if len(non_existent_paths) > 0: _UpperCAmelCase = """\n""".join(non_existent_paths) raise ValueError(F"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""") if all_paths != sorted(all_paths): raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
701
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ): """simple docstring""" super().__init__() A_ : Tuple = initial_learning_rate A_ : List[str] = warmup_steps A_ : int = power A_ : Dict = decay_schedule_fn A_ : Any = name def __call__( self , lowercase ): """simple docstring""" with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A_ : Optional[int] = tf.cast(lowercase , tf.floataa ) A_ : int = tf.cast(self.warmup_steps , tf.floataa ) A_ : Optional[int] = global_step_float / warmup_steps_float A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,): '''simple docstring''' A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,) if num_warmup_steps: A_ : Tuple = WarmUp( initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,) if weight_decay_rate > 0.0: A_ : Union[str, Any] = AdamWeightDecay( learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,) else: A_ : Dict = tf.keras.optimizers.Adam( learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ): """simple docstring""" super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase ) A_ : Dict = weight_decay_rate A_ : Union[str, Any] = include_in_weight_decay A_ : str = exclude_from_weight_decay @classmethod def lowerCAmelCase_ ( cls , lowercase ): """simple docstring""" A_ : Tuple = {'WarmUp': WarmUp} return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase ) A_ : Optional[Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ , A_ : Optional[int] = list(zip(*lowercase ) ) return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} A_ : List[str] = apply_state or {} A_ : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: A_ : Dict = self._fallback_apply_state(lowercase , lowercase ) A_ : int = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase , lowercase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase , lowercase ) is not None: return False return True class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self ): """simple docstring""" A_ : int = [] A_ : Optional[int] = None @property def lowerCAmelCase_ ( self ): """simple docstring""" if self._accum_steps is None: A_ : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowercase ): """simple docstring""" if not self._gradients: A_ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' ) for accum_gradient, gradient in zip(self._gradients , lowercase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase ) self._accum_steps.assign_add(1 ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase ) )
70
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger(__name__) def UpperCamelCase ( __lowercase : List[str] ): '''simple docstring''' A_ : int = 'huggingface/label-files' A_ : Tuple = 'imagenet-1k-id2label.json' A_ : Dict = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : Dict = {int(__lowercase ): v for k, v in idalabel.items()} A_ : Union[str, Any] = {v: k for k, v in idalabel.items()} A_ : Dict = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" A_ : Union[str, Any] = BitConfig( conv_layer=__lowercase ,num_labels=10_00 ,idalabel=__lowercase ,labelaid=__lowercase ,) return config def UpperCamelCase ( __lowercase : Dict ): '''simple docstring''' if "stem.conv" in name: A_ : str = name.replace('stem.conv' ,'bit.embedder.convolution' ) if "blocks" in name: A_ : Optional[Any] = name.replace('blocks' ,'layers' ) if "head.fc" in name: A_ : Any = name.replace('head.fc' ,'classifier.1' ) if name.startswith('norm' ): A_ : Tuple = 'bit.' + name if "bit" not in name and "classifier" not in name: A_ : List[Any] = 'bit.encoder.' + name return name def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' A_ : str = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw ) return im @torch.no_grad() def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : str ,__lowercase : List[str]=False ): '''simple docstring''' A_ : Any = get_config(__lowercase ) # load original model from timm A_ : List[str] = create_model(__lowercase ,pretrained=__lowercase ) timm_model.eval() # load state_dict of original model A_ : List[str] = timm_model.state_dict() for key in state_dict.copy().keys(): A_ : List[str] = state_dict.pop(__lowercase ) A_ : Dict = val.squeeze() if 'head' in key else val # load HuggingFace model A_ : Union[str, Any] = BitForImageClassification(__lowercase ) model.eval() model.load_state_dict(__lowercase ) # create image processor A_ : List[str] = create_transform(**resolve_data_config({} ,model=__lowercase ) ) A_ : Dict = transform.transforms A_ : Tuple = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } A_ : List[Any] = BitImageProcessor( do_resize=__lowercase ,size={'shortest_edge': timm_transforms[0].size} ,resample=pillow_resamplings[timm_transforms[0].interpolation.value] ,do_center_crop=__lowercase ,crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} ,do_normalize=__lowercase ,image_mean=timm_transforms[-1].mean.tolist() ,image_std=timm_transforms[-1].std.tolist() ,) A_ : Tuple = prepare_img() A_ : List[Any] = transform(__lowercase ).unsqueeze(0 ) A_ : str = processor(__lowercase ,return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(__lowercase ,__lowercase ) # verify logits with torch.no_grad(): A_ : Optional[Any] = model(__lowercase ) A_ : Optional[int] = outputs.logits print('Logits:' ,logits[0, :3] ) print('Predicted class:' ,model.config.idalabel[logits.argmax(-1 ).item()] ) A_ : Optional[int] = timm_model(__lowercase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__lowercase ,outputs.logits ,atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(__lowercase ).mkdir(exist_ok=__lowercase ) print(f'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(__lowercase ) processor.save_pretrained(__lowercase ) if push_to_hub: print(f'''Pushing model {model_name} and processor to the hub''' ) model.push_to_hub(f'''ybelkada/{model_name}''' ) processor.push_to_hub(f'''ybelkada/{model_name}''' ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""resnetv2_50x1_bitm""", type=str, help="""Name of the BiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub.""", ) _UpperCAmelCase = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
702
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : Any = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[str] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
70
0
'''simple docstring''' import functools def UpperCamelCase ( __lowercase : str ,__lowercase : str ): '''simple docstring''' A_ : Tuple = len(__lowercase ) A_ : Any = len(__lowercase ) @functools.cache def min_distance(__lowercase : int ,__lowercase : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa A_ : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 ,__lowercase ) ,1 + min_distance(__lowercase ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,) return min_distance(0 ,0 ) if __name__ == "__main__": import doctest doctest.testmod()
703
def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : int = len(__lowercase ) A_ : List[Any] = sum(__lowercase ) A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 ,n + 1 ): A_ : Optional[Any] = True for i in range(1 ,s + 1 ): A_ : Tuple = False for i in range(1 ,n + 1 ): for j in range(1 ,s + 1 ): A_ : Dict = dp[i][j - 1] if arr[i - 1] <= j: A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) ,-1 ,-1 ): if dp[n][j] is True: A_ : List[Any] = s - 2 * j break return diff
70
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""", } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''lxmert''' lowerCamelCase_ = {} def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=9_5_0_0 , lowercase=1_6_0_0 , lowercase=4_0_0 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=9 , lowercase=5 , lowercase=5 , lowercase=2_0_4_8 , lowercase=4 , lowercase=6.67 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=True , **lowercase , ): """simple docstring""" A_ : Union[str, Any] = vocab_size A_ : int = hidden_size A_ : List[Any] = num_attention_heads A_ : int = hidden_act A_ : int = intermediate_size A_ : Dict = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : List[str] = max_position_embeddings A_ : Union[str, Any] = type_vocab_size A_ : str = initializer_range A_ : int = layer_norm_eps A_ : str = num_qa_labels A_ : int = num_object_labels A_ : int = num_attr_labels A_ : int = l_layers A_ : Any = x_layers A_ : List[str] = r_layers A_ : Tuple = visual_feat_dim A_ : Tuple = visual_pos_dim A_ : Optional[int] = visual_loss_normalizer A_ : List[str] = task_matched A_ : Tuple = task_mask_lm A_ : List[str] = task_obj_predict A_ : int = task_qa A_ : Optional[Any] = visual_obj_loss A_ : Tuple = visual_attr_loss A_ : Dict = visual_feat_loss A_ : int = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers} super().__init__(**lowercase )
704
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : List[Any] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) ) self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : str = get_activation('gelu' ) A_ : int = get_activation('gelu_10' ) A_ : Optional[int] = torch_builtin(lowercase ) A_ : Tuple = geluaa(lowercase ) A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(lowercase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase_ ( self ): """simple docstring""" get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(lowercase ): get_activation('bogus' ) with self.assertRaises(lowercase ): get_activation(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = get_activation('gelu' ) A_ : List[str] = 1 A_ : Optional[Any] = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(lowercase ): A_ : str = acta.a
70
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _UpperCAmelCase = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""LayoutXLMTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = ["""LayoutXLMTokenizerFast"""] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
705
from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """RegNetConfig""" # Base docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , ) A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[str] = self.convolution(self.padding(lowercase ) ) A_ : List[str] = self.normalization(lowercase ) A_ : List[Any] = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[int] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[int] = self.embedder(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' ) A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" return self.normalization(self.convolution(lowercase ) , training=lowercase ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) A_ : Optional[Any] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = self.pooler(lowercase ) for layer_module in self.attention: A_ : Optional[Any] = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : Optional[int] = max(1 , out_channels // config.groups_width ) A_ : List[Any] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Optional[int] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ), ] A_ : List[str] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : int = layer_module(lowercase ) A_ : Union[str, Any] = self.shortcut(lowercase ) hidden_state += residual A_ : Dict = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : int = max(1 , out_channels // config.groups_width ) A_ : Optional[int] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) A_ : List[str] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ), ] A_ : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = hidden_state for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer A_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ), *[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ): """simple docstring""" A_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Dict = hidden_states + (hidden_state,) A_ : List[Any] = stage_module(lowercase ) if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' lowerCamelCase_ = RegNetConfig def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[Any] = config A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' ) A_ : str = TFRegNetEncoder(lowercase , name='encoder' ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase ) A_ : Optional[int] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Dict = encoder_outputs[0] A_ : List[Any] = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = RegNetConfig lowerCamelCase_ = '''regnet''' lowerCamelCase_ = '''pixel_values''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _UpperCAmelCase = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : int = TFRegNetMainLayer(lowercase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Tuple = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) class UpperCAmelCase ( __A , __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : List[Any] = config.num_labels A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' ) # classification head A_ : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : List[Any] = self.classifier[0](lowercase ) A_ : Union[str, Any] = self.classifier[1](lowercase ) A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
70
0
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = DistilBertTokenizer lowerCamelCase_ = DistilBertTokenizerFast lowerCamelCase_ = True @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
706
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
def UpperCamelCase ( __lowercase : list ): '''simple docstring''' A_ : str = len(__lowercase ) for _ in range(__lowercase ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: A_ : Optional[Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _UpperCAmelCase = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
707
def UpperCamelCase ( __lowercase : list ): '''simple docstring''' A_ : str = len(__lowercase ) for _ in range(__lowercase ): for i in range(_ % 2 ,arr_size - 1 ,2 ): if arr[i + 1] < arr[i]: A_ , A_ : Optional[Any] = arr[i + 1], arr[i] return arr if __name__ == "__main__": _UpperCAmelCase = list(range(10, 0, -1)) print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
70
0
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
708
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''wavlm''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : List[Any] = hidden_size A_ : Tuple = feat_extract_norm A_ : Dict = feat_extract_activation A_ : Optional[Any] = list(lowercase ) A_ : Union[str, Any] = list(lowercase ) A_ : List[str] = list(lowercase ) A_ : str = conv_bias A_ : Tuple = num_buckets A_ : Union[str, Any] = max_bucket_distance A_ : int = num_conv_pos_embeddings A_ : str = num_conv_pos_embedding_groups A_ : str = len(self.conv_dim ) A_ : Tuple = num_hidden_layers A_ : Tuple = intermediate_size A_ : Optional[Any] = hidden_act A_ : Optional[Any] = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Optional[Any] = activation_dropout A_ : Optional[int] = feat_proj_dropout A_ : List[Any] = final_dropout A_ : Union[str, Any] = layerdrop A_ : Dict = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : str = num_ctc_classes A_ : Any = vocab_size A_ : str = do_stable_layer_norm A_ : int = use_weighted_layer_sum A_ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : List[str] = apply_spec_augment A_ : Optional[Any] = mask_time_prob A_ : int = mask_time_length A_ : Any = mask_time_min_masks A_ : Optional[int] = mask_feature_prob A_ : Tuple = mask_feature_length # parameters for pretraining with codevector quantized representations A_ : int = num_codevectors_per_group A_ : Any = num_codevector_groups A_ : List[Any] = contrastive_logits_temperature A_ : Optional[Any] = num_negatives A_ : Optional[Any] = codevector_dim A_ : int = proj_codevector_dim A_ : int = diversity_loss_weight # ctc loss A_ : Union[str, Any] = ctc_loss_reduction A_ : Any = ctc_zero_infinity # adapter A_ : int = add_adapter A_ : Optional[Any] = adapter_kernel_size A_ : Optional[int] = adapter_stride A_ : Dict = num_adapter_layers A_ : str = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Tuple = list(lowercase ) A_ : Optional[Any] = list(lowercase ) A_ : Dict = list(lowercase ) A_ : Dict = xvector_output_dim @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
70
0
def UpperCamelCase ( __lowercase : str ,__lowercase : str ): '''simple docstring''' if len(__lowercase ) != len(__lowercase ): raise ValueError('String lengths must match!' ) A_ : Optional[int] = 0 for chara, chara in zip(__lowercase ,__lowercase ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
709
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase = logging.get_logger() def UpperCamelCase ( __lowercase : int ,__lowercase : str ,__lowercase : LevitConfig ,__lowercase : Path ,__lowercase : bool = True ): '''simple docstring''' print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": A_ : int = timm.create_model('levit_128s' ,pretrained=__lowercase ) else: A_ : str = timm.create_model('levit_128' ,pretrained=__lowercase ) if hidden_sizes == 1_92: A_ : List[str] = timm.create_model('levit_192' ,pretrained=__lowercase ) if hidden_sizes == 2_56: A_ : Optional[Any] = timm.create_model('levit_256' ,pretrained=__lowercase ) if hidden_sizes == 3_84: A_ : Tuple = timm.create_model('levit_384' ,pretrained=__lowercase ) from_model.eval() A_ : Dict = LevitForImageClassificationWithTeacher(__lowercase ).eval() A_ : Union[str, Any] = OrderedDict() A_ : Dict = from_model.state_dict() A_ : Tuple = list(from_model.state_dict().keys() ) A_ : str = list(our_model.state_dict().keys() ) print(len(__lowercase ) ,len(__lowercase ) ) for i in range(len(__lowercase ) ): A_ : str = weights[og_keys[i]] our_model.load_state_dict(__lowercase ) A_ : str = torch.randn((2, 3, 2_24, 2_24) ) A_ : str = from_model(__lowercase ) A_ : Optional[Any] = our_model(__lowercase ).logits assert torch.allclose(__lowercase ,__lowercase ), "The model logits don't match the original one." A_ : List[str] = name print(__lowercase ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) A_ : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def UpperCamelCase ( __lowercase : Path ,__lowercase : str = None ,__lowercase : bool = True ): '''simple docstring''' A_ : Dict = 'imagenet-1k-id2label.json' A_ : Optional[int] = 10_00 A_ : Optional[int] = (1, num_labels) A_ : int = 'huggingface/label-files' A_ : int = num_labels A_ : Union[str, Any] = json.load(open(hf_hub_download(__lowercase ,__lowercase ,repo_type='dataset' ) ,'r' ) ) A_ : int = {int(__lowercase ): v for k, v in idalabel.items()} A_ : List[str] = idalabel A_ : str = {v: k for k, v in idalabel.items()} A_ : int = partial(__lowercase ,num_labels=__lowercase ,idalabel=__lowercase ,labelaid=__lowercase ) A_ : Any = { 'levit-128S': 1_28, 'levit-128': 1_28, 'levit-192': 1_92, 'levit-256': 2_56, 'levit-384': 3_84, } A_ : Tuple = { 'levit-128S': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 6, 8] ,depths=[2, 3, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-128': ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] ,num_attention_heads=[4, 8, 12] ,depths=[4, 4, 4] ,key_dim=[16, 16, 16] ,drop_path_rate=0 ,), 'levit-192': ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] ,num_attention_heads=[3, 5, 6] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-256': ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] ,num_attention_heads=[4, 6, 8] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0 ,), 'levit-384': ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] ,num_attention_heads=[6, 9, 12] ,depths=[4, 4, 4] ,key_dim=[32, 32, 32] ,drop_path_rate=0.1 ,), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] ,__lowercase ,names_to_config[model_name] ,__lowercase ,__lowercase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] ,__lowercase ,__lowercase ,__lowercase ,__lowercase ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
70
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''wavlm''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(1_0, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=3_2_0 , lowercase=8_0_0 , lowercase=False , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=3_2_0 , lowercase=2 , lowercase=0.1 , lowercase=1_0_0 , lowercase=2_5_6 , lowercase=2_5_6 , lowercase=0.1 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=5_1_2 , lowercase=8_0 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : List[Any] = hidden_size A_ : Tuple = feat_extract_norm A_ : Dict = feat_extract_activation A_ : Optional[Any] = list(lowercase ) A_ : Union[str, Any] = list(lowercase ) A_ : List[str] = list(lowercase ) A_ : str = conv_bias A_ : Tuple = num_buckets A_ : Union[str, Any] = max_bucket_distance A_ : int = num_conv_pos_embeddings A_ : str = num_conv_pos_embedding_groups A_ : str = len(self.conv_dim ) A_ : Tuple = num_hidden_layers A_ : Tuple = intermediate_size A_ : Optional[Any] = hidden_act A_ : Optional[Any] = num_attention_heads A_ : str = hidden_dropout A_ : Optional[int] = attention_dropout A_ : Optional[Any] = activation_dropout A_ : Optional[int] = feat_proj_dropout A_ : List[Any] = final_dropout A_ : Union[str, Any] = layerdrop A_ : Dict = layer_norm_eps A_ : Optional[Any] = initializer_range A_ : str = num_ctc_classes A_ : Any = vocab_size A_ : str = do_stable_layer_norm A_ : int = use_weighted_layer_sum A_ : int = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : List[str] = apply_spec_augment A_ : Optional[Any] = mask_time_prob A_ : int = mask_time_length A_ : Any = mask_time_min_masks A_ : Optional[int] = mask_feature_prob A_ : Tuple = mask_feature_length # parameters for pretraining with codevector quantized representations A_ : int = num_codevectors_per_group A_ : Any = num_codevector_groups A_ : List[Any] = contrastive_logits_temperature A_ : Optional[Any] = num_negatives A_ : Optional[Any] = codevector_dim A_ : int = proj_codevector_dim A_ : int = diversity_loss_weight # ctc loss A_ : Union[str, Any] = ctc_loss_reduction A_ : Any = ctc_zero_infinity # adapter A_ : int = add_adapter A_ : Optional[Any] = adapter_kernel_size A_ : Optional[int] = adapter_stride A_ : Dict = num_adapter_layers A_ : str = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. A_ : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. A_ : Tuple = list(lowercase ) A_ : Optional[Any] = list(lowercase ) A_ : Dict = list(lowercase ) A_ : Dict = xvector_output_dim @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
710
def UpperCamelCase ( __lowercase : str ,__lowercase : int ): '''simple docstring''' A_ : int = word.split() def justify(__lowercase : list ,__lowercase : int ,__lowercase : int ) -> str: A_ : Optional[Any] = max_width - width A_ : Union[str, Any] = len(__lowercase ) if len(__lowercase ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: A_ : Dict = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] A_ : int = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] A_ : Optional[int] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__lowercase ): num_spaces_between_words_list[i] += 1 A_ : Tuple = [] for i in range(__lowercase ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * ' ' ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__lowercase ) A_ : List[str] = [] A_ : list[str] = [] A_ : Dict = 0 for word in words: if width + len(__lowercase ) + len(__lowercase ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__lowercase ) width += len(__lowercase ) else: # justify the line and add it to result answer.append(justify(__lowercase ,__lowercase ,__lowercase ) ) # reset new line and new width A_ , A_ : Any = [word], len(__lowercase ) A_ : int = max_width - width - len(__lowercase ) answer.append(' '.join(__lowercase ) + (remaining_spaces + 1) * ' ' ) return answer if __name__ == "__main__": from doctest import testmod testmod()
70
0
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Union[str, Any] ,__lowercase : Optional[int]=None ,__lowercase : Optional[Any]=None ): '''simple docstring''' if attention_mask is None: A_ : Dict = tf.cast(tf.math.not_equal(__lowercase ,config.pad_token_id ) ,tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class UpperCAmelCase : '''simple docstring''' lowerCamelCase_ = OPTConfig lowerCamelCase_ = {} lowerCamelCase_ = '''gelu''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=False , lowercase=9_9 , lowercase=1_6 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=2_0 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=1_6 , lowercase=1_6 , ): """simple docstring""" A_ : Tuple = parent A_ : Optional[Any] = batch_size A_ : Any = seq_length A_ : Optional[int] = is_training A_ : Optional[int] = use_labels A_ : Any = vocab_size A_ : Union[str, Any] = hidden_size A_ : Dict = num_hidden_layers A_ : Tuple = num_attention_heads A_ : Tuple = intermediate_size A_ : Tuple = hidden_act A_ : Optional[int] = hidden_dropout_prob A_ : int = attention_probs_dropout_prob A_ : Any = max_position_embeddings A_ : str = eos_token_id A_ : List[str] = pad_token_id A_ : Optional[int] = bos_token_id A_ : Any = embed_dim A_ : Optional[Any] = word_embed_proj_dim A_ : Optional[Any] = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) A_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) A_ : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 ) A_ : Tuple = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , ) A_ : Union[str, Any] = prepare_opt_inputs_dict(lowercase , lowercase ) return config, inputs_dict def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : Optional[Any] = TFOPTModel(config=lowercase ) A_ : List[Any] = inputs_dict['input_ids'] A_ : Optional[int] = input_ids[:1, :] A_ : Optional[Any] = inputs_dict['attention_mask'][:1, :] A_ : List[str] = 1 # first forward pass A_ : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase ) A_ : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids A_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and A_ : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 ) A_ : Tuple = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) A_ : Tuple = model(lowercase , attention_mask=lowercase )[0] A_ : Any = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice A_ : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) A_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] A_ : int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 ) @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () lowerCamelCase_ = (TFOPTForCausalLM,) if is_tf_available() else () lowerCamelCase_ = ( {'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = 1_0 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = TFOPTModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase , lowercase ): if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase , 'weight' ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings A_ : Dict = model_class(config=lowercase ) A_ : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) A_ : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase ) A_ : Dict = _get_word_embedding_weight(lowercase , model.get_input_embeddings() ) A_ : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. A_ : int = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase ) # check that weights remain the same after resizing A_ : Any = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: A_ : int = False self.assertTrue(lowercase ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase ) A_ : Tuple = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: A_ : Union[str, Any] = False self.assertTrue(lowercase ) def UpperCamelCase ( __lowercase : Any ): '''simple docstring''' return tf.constant(__lowercase ,dtype=tf.intaa ) @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = 9_9 def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = tf.ones((4, 1) , dtype=tf.intaa ) * 2 A_ : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) A_ : Dict = input_ids.shape[0] A_ : Union[str, Any] = OPTConfig( vocab_size=self.vocab_size , hidden_size=2_4 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFOPTModel.from_pretrained('facebook/opt-350m' ) A_ : Optional[int] = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) A_ : Any = tf.not_equal(lowercase , model.config.pad_token_id ) with tf.GradientTape(): A_ : Dict = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state A_ : List[Any] = (1, 1_1, 5_1_2) self.assertEqual(output.shape , lowercase ) A_ : List[Any] = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) ) A_ : Dict = tf.function(lowercase , jit_compile=lowercase ) A_ : Optional[int] = xla_generate(lowercase , lowercase )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) ) @require_tf @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" super().setUp() A_ : List[Any] = 'facebook/opt-350m' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFOPTForCausalLM.from_pretrained(self.path_model ) A_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model ) A_ : Any = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False A_ : Optional[int] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase ) A_ : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) A_ : Optional[Any] = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) A_ : Union[str, Any] = tf.function(lowercase , jit_compile=lowercase ) A_ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) ) @require_tf @slow class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = 'facebook/opt-125m' A_ : Optional[int] = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] A_ : Dict = [] A_ : List[Any] = GPTaTokenizer.from_pretrained(lowercase ) A_ : Tuple = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: A_ : Dict = tokenizer(lowercase , return_tensors='tf' ).input_ids A_ : List[Any] = model.generate(lowercase , max_length=1_0 ) A_ : Optional[int] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = 'facebook/opt-350m' A_ : Optional[int] = GPTaTokenizer.from_pretrained(lowercase ) A_ : Tuple = TFOPTForCausalLM.from_pretrained(lowercase ) A_ : Tuple = 'left' # use different length sentences to test batching A_ : Any = [ 'Hello, my dog is a little', 'Today, I', ] A_ : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase ) A_ : List[str] = inputs['input_ids'] A_ : Optional[int] = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] ) A_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids A_ : Any = model.generate(input_ids=lowercase ) A_ : Any = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs['attention_mask'][-1] , tf.intaa ) ) A_ : List[str] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids A_ : Optional[Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings ) A_ : Tuple = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) A_ : List[str] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase ) A_ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase ) A_ : List[Any] = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(lowercase , lowercase ) self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = 'facebook/opt-350m' A_ : Any = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] A_ : List[Any] = [] A_ : List[Any] = GPTaTokenizer.from_pretrained(lowercase ) A_ : int = TFOPTForCausalLM.from_pretrained(lowercase ) for prompt in self.prompts: A_ : int = tokenizer(lowercase , return_tensors='tf' ).input_ids A_ : int = model.generate(lowercase , max_length=1_0 ) A_ : str = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase ) predicted_outputs += generated_string self.assertListEqual(lowercase , lowercase )
711
import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa _UpperCAmelCase = logging.getLogger(__name__) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''summarization''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ROUGE_KEYS lowerCamelCase_ = '''rouge2''' def __init__( self , lowercase , **lowercase ): """simple docstring""" if hparams.sortish_sampler and hparams.gpus > 1: A_ : str = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError('Dynamic Batch size does not work for multi-gpu training' ) if hparams.sortish_sampler: raise ValueError('--sortish_sampler and --max_tokens_per_batch may not be used simultaneously' ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , 'summarization' ) save_git_info(self.hparams.output_dir ) A_ : List[str] = Path(self.output_dir ) / 'metrics.json' A_ : List[str] = Path(self.output_dir ) / 'hparams.pkl' pickle_save(self.hparams , self.hparams_save_path ) A_ : str = 0 A_ : Any = defaultdict(lowercase ) A_ : Union[str, Any] = self.config.model_type A_ : int = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size A_ : dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } A_ : Optional[Any] = { 'train': self.hparams.n_train, 'val': self.hparams.n_val, 'test': self.hparams.n_test, } A_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} A_ : Tuple = { 'train': self.hparams.max_target_length, 'val': self.hparams.val_max_target_length, 'test': self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}''' assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}''' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) A_ : int = get_git_info()['repo_sha'] A_ : int = hparams.num_workers A_ : Union[str, Any] = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): A_ : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang] A_ : Any = self.decoder_start_token_id A_ : str = ( SeqaSeqDataset if hasattr(self.tokenizer , 'prepare_seq2seq_batch' ) else LegacySeqaSeqDataset ) A_ : Union[str, Any] = False A_ : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: A_ : int = self.hparams.eval_max_gen_length else: A_ : List[Any] = self.model.config.max_length A_ : List[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = { k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / 'text_batch.json' ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / 'tok_batch.json' ) A_ : int = True return readable_batch def lowerCAmelCase_ ( self , lowercase , **lowercase ): """simple docstring""" return self.model(lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[Any] = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = self.tokenizer.pad_token_id A_ , A_ : List[str] = batch['input_ids'], batch['attention_mask'] A_ : str = batch['labels'] if isinstance(self.model , lowercase ): A_ : Optional[int] = self.model._shift_right(lowercase ) else: A_ : Any = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero A_ : Optional[Any] = decoder_input_ids self.save_readable_batch(lowercase ) A_ : List[str] = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) A_ : Dict = outputs['logits'] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id A_ : Union[str, Any] = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size A_ : Any = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: A_ : List[Any] = nn.functional.log_softmax(lowercase , dim=-1 ) A_ , A_ : Any = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def lowerCAmelCase_ ( self ): """simple docstring""" return self.tokenizer.pad_token_id def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : str = self._step(lowercase ) A_ : Optional[int] = dict(zip(self.loss_names , lowercase ) ) # tokens per batch A_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum() A_ : str = batch['input_ids'].shape[0] A_ : Any = batch['input_ids'].eq(self.pad ).sum() A_ : Optional[int] = batch['input_ids'].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase="val" ): """simple docstring""" self.step_count += 1 A_ : Union[str, Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} A_ : Dict = losses['loss'] A_ : int = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len'] } A_ : Any = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) A_ : torch.FloatTensor = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) A_ : Tuple = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()} A_ : Tuple = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path A_ : Dict = flatten_list([x['preds'] for x in outputs] ) return { "log": all_metrics, "preds": preds, F'''{prefix}_loss''': loss, F'''{prefix}_{self.val_metric}''': metric_tensor, } def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_rouge(lowercase , lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') A_ : Optional[int] = self.model.generate( batch['input_ids'] , attention_mask=batch['attention_mask'] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) A_ : int = (time.time() - ta) / batch['input_ids'].shape[0] A_ : List[str] = self.ids_to_clean_text(lowercase ) A_ : List[str] = self.ids_to_clean_text(batch['labels'] ) A_ : List[Any] = self._step(lowercase ) A_ : int = dict(zip(self.loss_names , lowercase ) ) A_ : Dict = self.calc_generative_metrics(lowercase , lowercase ) A_ : List[Any] = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return self._generative_step(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.validation_epoch_end(lowercase , prefix='test' ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : str = self.n_obs[type_path] A_ : List[Any] = self.target_lens[type_path] A_ : str = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase = False ): """simple docstring""" A_ : Optional[int] = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": A_ : str = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": A_ : str = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.get_dataloader('train' , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('val' , batch_size=self.hparams.eval_batch_size ) def lowerCAmelCase_ ( self ): """simple docstring""" return self.get_dataloader('test' , batch_size=self.hparams.eval_batch_size ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( '--max_source_length' , default=1_0_2_4 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--max_target_length' , default=5_6 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--val_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument( '--test_max_target_length' , default=1_4_2 , type=lowercase , help=( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) , ) parser.add_argument('--freeze_encoder' , action='store_true' ) parser.add_argument('--freeze_embeds' , action='store_true' ) parser.add_argument('--sortish_sampler' , action='store_true' , default=lowercase ) parser.add_argument('--overwrite_output_dir' , action='store_true' , default=lowercase ) parser.add_argument('--max_tokens_per_batch' , type=lowercase , default=lowercase ) parser.add_argument('--logger_name' , type=lowercase , choices=['default', 'wandb', 'wandb_shared'] , default='default' ) parser.add_argument('--n_train' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_val' , type=lowercase , default=5_0_0 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--n_test' , type=lowercase , default=-1 , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument( '--task' , type=lowercase , default='summarization' , required=lowercase , help='# examples. -1 means use all.' ) parser.add_argument('--label_smoothing' , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument('--src_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--tgt_lang' , type=lowercase , default='' , required=lowercase ) parser.add_argument('--eval_beams' , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( '--val_metric' , type=lowercase , default=lowercase , required=lowercase , choices=['bleu', 'rouge2', 'loss', None] ) parser.add_argument('--eval_max_gen_length' , type=lowercase , default=lowercase , help='never generate more than n tokens' ) parser.add_argument('--save_top_k' , type=lowercase , default=1 , required=lowercase , help='How many checkpoints to save' ) parser.add_argument( '--early_stopping_patience' , type=lowercase , default=-1 , required=lowercase , help=( '-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So' ' val_check_interval will effect it.' ) , ) return parser class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''translation''' lowerCamelCase_ = ['''loss'''] lowerCamelCase_ = ['''bleu'''] lowerCamelCase_ = '''bleu''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , **lowercase ) A_ : List[Any] = hparams.src_lang A_ : str = hparams.tgt_lang def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return calculate_bleu(lowercase , lowercase ) def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Tuple=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=__lowercase ) check_output_dir(__lowercase ,expected_items=3 ) if model is None: if "summarization" in args.task: A_ : SummarizationModule = SummarizationModule(__lowercase ) else: A_ : SummarizationModule = TranslationModule(__lowercase ) A_ : Optional[int] = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith('/tmp' ) or str(args.output_dir ).startswith('/var' ) ): A_ : List[str] = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger A_ : List[str] = os.environ.get('WANDB_PROJECT' ,__lowercase ) A_ : List[Any] = WandbLogger(name=model.output_dir.name ,project=__lowercase ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger A_ : str = WandbLogger(name=model.output_dir.name ,project=f'''hf_{dataset}''' ) if args.early_stopping_patience >= 0: A_ : Dict = get_early_stopping_callback(model.val_metric ,args.early_stopping_patience ) else: A_ : str = False A_ : Dict = args.val_metric == 'loss' A_ : pl.Trainer = generic_train( __lowercase ,__lowercase ,logging_callback=SeqaSeqLoggingCallback() ,checkpoint_callback=get_checkpoint_callback( args.output_dir ,model.val_metric ,args.save_top_k ,__lowercase ) ,early_stopping_callback=__lowercase ,logger=__lowercase ,) pickle_save(model.hparams ,model.output_dir / 'hparams.pkl' ) if not args.do_predict: return model A_ : Optional[Any] = '' A_ : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir ,'*.ckpt' ) ,recursive=__lowercase ) ) if checkpoints: A_ : List[Any] = checkpoints[-1] A_ : Any = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() _UpperCAmelCase = pl.Trainer.add_argparse_args(parser) _UpperCAmelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd()) _UpperCAmelCase = parser.parse_args() main(args)
70
0
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ): '''simple docstring''' A_ : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ): '''simple docstring''' A_ : int = 0 while b > 0: if b & 1: A_ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
712
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=3 , lowercase=3_2 , lowercase=3 , lowercase=1_0 , lowercase=[1_0, 2_0, 3_0, 4_0] , lowercase=[1, 1, 2, 1] , lowercase=True , lowercase=True , lowercase="relu" , lowercase=3 , lowercase=None , ): """simple docstring""" A_ : List[Any] = parent A_ : Optional[Any] = batch_size A_ : Dict = image_size A_ : str = num_channels A_ : Union[str, Any] = embeddings_size A_ : Optional[Any] = hidden_sizes A_ : Any = depths A_ : List[str] = is_training A_ : int = use_labels A_ : Optional[Any] = hidden_act A_ : List[Any] = num_labels A_ : Optional[int] = scope A_ : int = len(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Union[str, Any] = None if self.use_labels: A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels ) A_ : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCAmelCase_ ( self ): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Any = TFRegNetModel(config=lowercase ) A_ : Optional[Any] = model(lowercase , training=lowercase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : int = self.num_labels A_ : Tuple = TFRegNetForImageClassification(lowercase ) A_ : List[str] = model(lowercase , labels=lowercase , training=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = self.prepare_config_and_inputs() A_ , A_ , A_ : List[Any] = config_and_inputs A_ : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowerCamelCase_ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = TFRegNetModelTester(self ) A_ : List[Any] = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def lowerCAmelCase_ ( self ): """simple docstring""" pass def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(lowercase ) A_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Optional[Any] = [*signature.parameters.keys()] A_ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" def check_hidden_states_output(lowercase , lowercase , lowercase ): A_ : List[Any] = model_class(lowercase ) A_ : int = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase ) A_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowercase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: A_ : int = layer_type A_ : Tuple = True check_hidden_states_output(lowercase , lowercase , lowercase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A_ : Any = True check_hidden_states_output(lowercase , lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowercase , lowercase , lowercase , lowercase={} ): A_ : Tuple = model(lowercase , return_dict=lowercase , **lowercase ) A_ : Optional[Any] = model(lowercase , return_dict=lowercase , **lowercase ).to_tuple() def recursive_check(lowercase , lowercase ): if isinstance(lowercase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowercase , lowercase ): recursive_check(lowercase , lowercase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowercase , lowercase ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(lowercase , lowercase ) for model_class in self.all_model_classes: A_ : Dict = model_class(lowercase ) A_ : Optional[int] = self._prepare_for_class(lowercase , lowercase ) A_ : Union[str, Any] = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : List[str] = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase ) A_ : Any = self._prepare_for_class(lowercase , lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) A_ : Tuple = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) A_ : int = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase ) check_equivalence(lowercase , lowercase , lowercase , {'output_hidden_states': True} ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = TFRegNetModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowerCAmelCase_ ( self ): """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) A_ : int = self.default_image_processor A_ : List[str] = prepare_img() A_ : Any = image_processor(images=lowercase , return_tensors='tf' ) # forward pass A_ : Tuple = model(**lowercase , training=lowercase ) # verify the logits A_ : int = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , lowercase ) A_ : Tuple = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
70
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""", # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''sew''' def __init__( self , lowercase=3_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase=2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="group" , lowercase="gelu" , lowercase=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase=False , lowercase=1_2_8 , lowercase=1_6 , lowercase=True , lowercase=0.05 , lowercase=1_0 , lowercase=2 , lowercase=0.0 , lowercase=1_0 , lowercase=0 , lowercase="mean" , lowercase=False , lowercase=False , lowercase=2_5_6 , lowercase=0 , lowercase=1 , lowercase=2 , **lowercase , ): """simple docstring""" super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase ) A_ : Union[str, Any] = hidden_size A_ : str = feat_extract_norm A_ : int = feat_extract_activation A_ : List[str] = list(lowercase ) A_ : str = list(lowercase ) A_ : Tuple = list(lowercase ) A_ : int = conv_bias A_ : int = num_conv_pos_embeddings A_ : Dict = num_conv_pos_embedding_groups A_ : Any = len(self.conv_dim ) A_ : Optional[Any] = num_hidden_layers A_ : List[str] = intermediate_size A_ : List[str] = squeeze_factor A_ : Optional[int] = hidden_act A_ : Optional[Any] = num_attention_heads A_ : List[str] = hidden_dropout A_ : Dict = attention_dropout A_ : int = activation_dropout A_ : Any = feat_proj_dropout A_ : Any = final_dropout A_ : Dict = layerdrop A_ : str = layer_norm_eps A_ : int = initializer_range A_ : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ : Any = apply_spec_augment A_ : Union[str, Any] = mask_time_prob A_ : Optional[int] = mask_time_length A_ : int = mask_time_min_masks A_ : List[Any] = mask_feature_prob A_ : int = mask_feature_length A_ : str = mask_feature_min_masks # ctc loss A_ : Any = ctc_loss_reduction A_ : Any = ctc_zero_infinity # sequence classification A_ : Optional[Any] = use_weighted_layer_sum A_ : Any = classifier_proj_size @property def lowerCAmelCase_ ( self ): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
713
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Dict ): '''simple docstring''' A_ : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Union[str, Any] ): '''simple docstring''' A_ : int = 0 while b > 0: if b & 1: A_ : Any = ((res % c) + (a % c)) % c a += a b >>= 1 return res
70
0
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class UpperCAmelCase : '''simple docstring''' def __init__( self , lowercase , lowercase=1_3 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=9_9 , lowercase=3_2 , lowercase=2 , lowercase=4 , lowercase=3_7 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=1_6 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , lowercase=0 , ): """simple docstring""" A_ : str = parent A_ : List[Any] = batch_size A_ : List[Any] = seq_length A_ : int = is_training A_ : List[Any] = use_input_mask A_ : int = use_token_type_ids A_ : Tuple = use_labels A_ : Union[str, Any] = vocab_size A_ : Tuple = hidden_size A_ : Optional[int] = num_hidden_layers A_ : Any = num_attention_heads A_ : List[str] = intermediate_size A_ : Tuple = hidden_act A_ : Any = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : Union[str, Any] = max_position_embeddings A_ : Optional[Any] = type_vocab_size A_ : int = type_sequence_label_size A_ : Union[str, Any] = initializer_range A_ : List[str] = num_labels A_ : Optional[Any] = num_choices A_ : List[Any] = scope A_ : str = projection_dim def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A_ : Tuple = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Optional[int] = None if self.use_token_type_ids: A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A_ : Optional[int] = None A_ : List[str] = None A_ : Optional[Any] = None if self.use_labels: A_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) A_ : Optional[Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) A_ : Dict = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Tuple = TFDPRContextEncoder(config=lowercase ) A_ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) A_ : Tuple = model(lowercase , token_type_ids=lowercase ) A_ : str = model(lowercase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" A_ : int = TFDPRQuestionEncoder(config=lowercase ) A_ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) A_ : int = model(lowercase , token_type_ids=lowercase ) A_ : Optional[Any] = model(lowercase ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ): """simple docstring""" A_ : List[Any] = TFDPRReader(config=lowercase ) A_ : Dict = model(lowercase , attention_mask=lowercase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = self.prepare_config_and_inputs() ( A_ ) : List[Any] = config_and_inputs A_ : Optional[int] = {'input_ids': input_ids} return config, inputs_dict @require_tf class UpperCAmelCase ( __A , __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) lowerCamelCase_ = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {} lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFDPRModelTester(self ) A_ : Optional[Any] = ConfigTester(self , config_class=lowercase , hidden_size=3_7 ) def lowerCAmelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = TFDPRContextEncoder.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[int] = TFDPRContextEncoder.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = TFDPRQuestionEncoder.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Optional[Any] = TFDPRReader.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' ) A_ : Tuple = tf.constant( [[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP] A_ : List[str] = model(lowercase )[0] # embedding shape = (1, 768) # compare the actual values for a slice. A_ : List[str] = tf.constant( [ [ 0.0323_6253, 0.1275_3335, 0.1681_8509, 0.0027_9786, 0.389_6933, 0.2426_4945, 0.217_8971, -0.0233_5227, -0.0848_1959, -0.1432_4117, ] ] ) self.assertTrue(numpy.allclose(output[:, :1_0].numpy() , expected_slice.numpy() , atol=1E-4 ) )
714
def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if length <= 0 or not isinstance(__lowercase ,__lowercase ): raise ValueError('Length must be a positive integer.' ) return [n * (2 * n - 1) for n in range(__lowercase )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
70
0
import warnings from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = { """nvidia/segformer-b0-finetuned-ade-512-512""": ( """https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json""" ), # See all SegFormer models at https://huggingface.co/models?filter=segformer } class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = '''segformer''' def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[3_2, 6_4, 1_6_0, 2_5_6] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.1 , lowercase=0.02 , lowercase=0.1 , lowercase=1E-6 , lowercase=2_5_6 , lowercase=2_5_5 , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False: warnings.warn( 'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be' ' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase , ) A_ : Optional[int] = num_channels A_ : int = num_encoder_blocks A_ : Optional[int] = depths A_ : Optional[int] = sr_ratios A_ : int = hidden_sizes A_ : List[str] = patch_sizes A_ : Any = strides A_ : Union[str, Any] = mlp_ratios A_ : str = num_attention_heads A_ : List[str] = hidden_act A_ : Any = hidden_dropout_prob A_ : Optional[Any] = attention_probs_dropout_prob A_ : Union[str, Any] = classifier_dropout_prob A_ : Dict = initializer_range A_ : Any = drop_path_rate A_ : Dict = layer_norm_eps A_ : Optional[int] = decoder_hidden_size A_ : Any = kwargs.get('reshape_last_stage' , lowercase ) A_ : Tuple = semantic_loss_ignore_index class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = version.parse('''1.11''' ) @property def lowerCAmelCase_ ( self ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowerCAmelCase_ ( self ): """simple docstring""" return 1E-4 @property def lowerCAmelCase_ ( self ): """simple docstring""" return 1_2
715
from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def UpperCamelCase ( ): '''simple docstring''' A_ , A_ : Any = 9, 14 # noqa: F841 A_ : str = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : List[Any] = defaultdict(__lowercase ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) A_ : Tuple = mst(__lowercase ) A_ : Tuple = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : List[Any] = tuple(answer[:2] ) A_ : Union[str, Any] = tuple(edge[::-1] ) assert edge in result or reverse in result
70
0
import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize('dataset_size' ,[None, 4_00 * 2**20, 6_00 * 2**20] ) @pytest.mark.parametrize('input_in_memory_max_size' ,['default', 0, 1_00 * 2**20, 9_00 * 2**20] ) def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Any ,__lowercase : int ): '''simple docstring''' if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config ,'IN_MEMORY_MAX_SIZE' ,__lowercase ) A_ : List[str] = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: A_ : Dict = dataset_size < in_memory_max_size else: A_ : Dict = False A_ : Union[str, Any] = is_small_dataset(__lowercase ) assert result == expected
716
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def UpperCamelCase ( ): '''simple docstring''' A_ : List[Any] = ArgumentParser('Accelerate CLI tool' ,usage='accelerate <command> [<args>]' ,allow_abbrev=__lowercase ) A_ : Any = parser.add_subparsers(help='accelerate command helpers' ) # Register commands get_config_parser(subparsers=__lowercase ) env_command_parser(subparsers=__lowercase ) launch_command_parser(subparsers=__lowercase ) tpu_command_parser(subparsers=__lowercase ) test_command_parser(subparsers=__lowercase ) # Let's go A_ : Optional[Any] = parser.parse_args() if not hasattr(__lowercase ,'func' ): parser.print_help() exit(1 ) # Run args.func(__lowercase ) if __name__ == "__main__": main()
70
0
import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self ): """simple docstring""" super().__init__() A_ : Tuple = nn.Linear(3 , 4 ) A_ : Dict = nn.BatchNormad(4 ) A_ : Optional[Any] = nn.Linear(4 , 5 ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(lowercase ) ) ) class UpperCAmelCase ( __A ): '''simple docstring''' def lowerCAmelCase_ ( self , lowercase , *lowercase , **lowercase ): """simple docstring""" return (args[0] + 1,) + args[1:], kwargs class UpperCAmelCase ( __A ): '''simple docstring''' def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" return output + 1 class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = ModelForTest() A_ : Any = ModelHook() add_hook_to_module(lowercase , lowercase ) self.assertEqual(test_model._hf_hook , lowercase ) self.assertTrue(hasattr(lowercase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowercase ) self.assertFalse(hasattr(lowercase , '_hf_hook' ) ) self.assertFalse(hasattr(lowercase , '_old_forward' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = ModelForTest() A_ : Tuple = ModelHook() add_hook_to_module(lowercase , lowercase ) add_hook_to_module(lowercase , lowercase , append=lowercase ) self.assertEqual(isinstance(test_model._hf_hook , lowercase ) , lowercase ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(lowercase , '_old_forward' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , 'forward' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['x'] ) remove_hook_from_module(lowercase ) self.assertFalse(hasattr(lowercase , '_hf_hook' ) ) self.assertFalse(hasattr(lowercase , '_old_forward' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = ModelForTest() A_ : List[Any] = torch.randn(2 , 3 ) A_ : str = test_model(x + 1 ) A_ : Tuple = test_model(x + 2 ) A_ : Any = PreForwardHook() add_hook_to_module(lowercase , lowercase ) A_ : str = test_model(lowercase ) self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A_ : Optional[Any] = PreForwardHook() add_hook_to_module(lowercase , lowercase ) A_ : Any = test_model(lowercase ) self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks A_ : Tuple = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(lowercase , lowercase ) A_ : List[str] = test_model(lowercase ) assert torch.allclose(lowercase , lowercase , atol=1E-5 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = ModelForTest() A_ : Dict = torch.randn(2 , 3 ) A_ : List[str] = test_model(lowercase ) A_ : Union[str, Any] = PostForwardHook() add_hook_to_module(lowercase , lowercase ) A_ : List[str] = test_model(lowercase ) self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain A_ : Dict = PostForwardHook() add_hook_to_module(lowercase , lowercase ) A_ : int = test_model(lowercase ) self.assertTrue(torch.allclose(lowercase , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks A_ : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(lowercase , lowercase ) A_ : str = test_model(lowercase ) assert torch.allclose(lowercase , output + 2 , atol=1E-5 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = ModelForTest() A_ : Optional[Any] = torch.randn(2 , 3 ) A_ : Optional[int] = test_model(lowercase ) A_ : List[Any] = PostForwardHook() add_hook_to_module(lowercase , lowercase ) A_ : Optional[Any] = test_model(lowercase ) self.assertTrue(torch.allclose(lowercase , output + 1 ) ) self.assertTrue(outputa.requires_grad ) A_ : List[str] = True A_ : Any = test_model(lowercase ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[str] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device A_ : Tuple = torch.randn(2 , 3 ) A_ : List[str] = model(lowercase ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(lowercase , AlignDevicesHook(io_same_device=lowercase ) ) A_ : Dict = torch.randn(2 , 3 ).to(0 ) A_ : List[Any] = model(lowercase ) self.assertEqual(output.device , torch.device(0 ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Any = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices A_ : List[Any] = {'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True} add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device A_ : Any = torch.device(hook_kwargs['execution_device'] ) self.assertEqual(model.batchnorm.running_mean.device , lowercase ) A_ : str = torch.randn(2 , 3 ) A_ : Optional[Any] = model(lowercase ) self.assertEqual(output.device , lowercase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload A_ : Union[str, Any] = { 'execution_device': 0 if torch.cuda.is_available() else 'cpu', 'offload': True, 'offload_buffers': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**lowercase ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**lowercase ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) A_ : str = torch.randn(2 , 3 ) A_ : Union[str, Any] = model(lowercase ) self.assertEqual(output.device , lowercase ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices A_ : Optional[int] = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device A_ : str = torch.device(lowercase ) self.assertEqual(model.batchnorm.running_mean.device , lowercase ) A_ : Dict = torch.randn(2 , 3 ) A_ : str = model(lowercase ) self.assertEqual(output.device , lowercase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook(lowercase , execution_device=lowercase , offload=lowercase , offload_buffers=lowercase ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) A_ : List[str] = torch.randn(2 , 3 ) A_ : Any = model(lowercase ) self.assertEqual(output.device , lowercase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # This will move each submodule on different devices A_ : Dict = 0 if torch.cuda.is_available() else 'cpu' attach_align_device_hook( lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) # Buffers are not included in the offload by default, so are on the execution device A_ : str = torch.device(lowercase ) self.assertEqual(model.batchnorm.running_mean.device , lowercase ) A_ : Tuple = torch.randn(2 , 3 ) A_ : int = model(lowercase ) self.assertEqual(output.device , lowercase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) # Now test with buffers included in the offload attach_align_device_hook( lowercase , execution_device=lowercase , offload=lowercase , weights_map=model.state_dict() , offload_buffers=lowercase , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('meta' ) ) self.assertEqual(model.lineara.weight.device , torch.device('meta' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('meta' ) ) A_ : str = torch.randn(2 , 3 ) A_ : Union[str, Any] = model(lowercase ) self.assertEqual(output.device , lowercase ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(lowercase ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('cpu' ) ) self.assertEqual(model.lineara.weight.device , torch.device('cpu' ) )
717
from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = DistilBertTokenizer lowerCamelCase_ = DistilBertTokenizerFast lowerCamelCase_ = True @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' ) A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase ) A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase ) A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase ) A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
70
0
'''simple docstring''' from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig _UpperCAmelCase = logging.get_logger(__name__) # General docstring _UpperCAmelCase = """RegNetConfig""" # Base docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = [1, 1088, 7, 7] # Image classification docstring _UpperCAmelCase = """facebook/regnet-y-040""" _UpperCAmelCase = """tabby, tabby cat""" _UpperCAmelCase = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 3 , lowercase = 1 , lowercase = 1 , lowercase = "relu" , **lowercase , ): """simple docstring""" super().__init__(**lowercase ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb A_ : int = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=lowercase , strides=lowercase , padding='VALID' , groups=lowercase , use_bias=lowercase , name='convolution' , ) A_ : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) A_ : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : List[str] = self.convolution(self.padding(lowercase ) ) A_ : List[str] = self.normalization(lowercase ) A_ : List[Any] = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[int] = config.num_channels A_ : str = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = shape_list(lowercase )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( 'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 2, 3, 1) ) A_ : Optional[int] = self.embedder(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.ConvaD( filters=lowercase , kernel_size=1 , strides=lowercase , use_bias=lowercase , name='convolution' ) A_ : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' ) def lowerCAmelCase_ ( self , lowercase , lowercase = False ): """simple docstring""" return self.normalization(self.convolution(lowercase ) , training=lowercase ) class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) A_ : Optional[Any] = [ tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='relu' , name='attention.0' ), tf.keras.layers.ConvaD(filters=lowercase , kernel_size=1 , activation='sigmoid' , name='attention.2' ), ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = self.pooler(lowercase ) for layer_module in self.attention: A_ : Optional[Any] = layer_module(lowercase ) A_ : Optional[int] = hidden_state * pooled return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : Optional[int] = max(1 , out_channels // config.groups_width ) A_ : List[Any] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. A_ : Optional[int] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.2' ), ] A_ : List[str] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Union[str, Any] = hidden_state for layer_module in self.layers: A_ : int = layer_module(lowercase ) A_ : Union[str, Any] = self.shortcut(lowercase ) hidden_state += residual A_ : Dict = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : str = in_channels != out_channels or stride != 1 A_ : int = max(1 , out_channels // config.groups_width ) A_ : Optional[int] = ( TFRegNetShortCut(lowercase , stride=lowercase , name='shortcut' ) if should_apply_shortcut else tf.keras.layers.Activation('linear' , name='shortcut' ) ) A_ : List[str] = [ TFRegNetConvLayer(lowercase , kernel_size=1 , activation=config.hidden_act , name='layer.0' ), TFRegNetConvLayer( lowercase , stride=lowercase , groups=lowercase , activation=config.hidden_act , name='layer.1' ), TFRegNetSELayer(lowercase , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ), TFRegNetConvLayer(lowercase , kernel_size=1 , activation=lowercase , name='layer.3' ), ] A_ : Union[str, Any] = ACTaFN[config.hidden_act] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Dict = hidden_state for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) A_ : int = self.shortcut(lowercase ) hidden_state += residual A_ : str = self.activation(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 2 , lowercase = 2 , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Tuple = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer A_ : Tuple = [ # downsampling is done in the first layer with stride of 2 layer(lowercase , lowercase , lowercase , stride=lowercase , name='layers.0' ), *[layer(lowercase , lowercase , lowercase , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )], ] def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" for layer_module in self.layers: A_ : Tuple = layer_module(lowercase ) return hidden_state class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : List[str] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( lowercase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) ) A_ : Tuple = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase , config.depths[1:] ) ): self.stages.append(TFRegNetStage(lowercase , lowercase , lowercase , depth=lowercase , name=F'''stages.{i+1}''' ) ) def lowerCAmelCase_ ( self , lowercase , lowercase = False , lowercase = True ): """simple docstring""" A_ : Tuple = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: A_ : Dict = hidden_states + (hidden_state,) A_ : List[Any] = stage_module(lowercase ) if output_hidden_states: A_ : Union[str, Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase , hidden_states=lowercase ) @keras_serializable class UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' lowerCamelCase_ = RegNetConfig def __init__( self , lowercase , **lowercase ): """simple docstring""" super().__init__(**lowercase ) A_ : Optional[Any] = config A_ : int = TFRegNetEmbeddings(lowercase , name='embedder' ) A_ : str = TFRegNetEncoder(lowercase , name='encoder' ) A_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase , name='pooler' ) @unpack_inputs def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = False , ): """simple docstring""" A_ : Optional[int] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict A_ : Union[str, Any] = self.embedder(lowercase , training=lowercase ) A_ : Optional[int] = self.encoder( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Dict = encoder_outputs[0] A_ : List[Any] = self.pooler(lowercase ) # Change to NCHW output format have uniformity in the modules A_ : Union[str, Any] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) A_ : Optional[int] = tf.transpose(lowercase , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: A_ : int = tuple([tf.transpose(lowercase , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase , pooler_output=lowercase , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase ( __A ): '''simple docstring''' lowerCamelCase_ = RegNetConfig lowerCamelCase_ = '''regnet''' lowerCamelCase_ = '''pixel_values''' @property def lowerCAmelCase_ ( self ): """simple docstring""" return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) , dtype=tf.floataa )} _UpperCAmelCase = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ _UpperCAmelCase = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : int = TFRegNetMainLayer(lowercase , name='regnet' ) @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : Tuple = self.regnet( pixel_values=lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) class UpperCAmelCase ( __A , __A ): '''simple docstring''' def __init__( self , lowercase , *lowercase , **lowercase ): """simple docstring""" super().__init__(lowercase , *lowercase , **lowercase ) A_ : List[Any] = config.num_labels A_ : Optional[Any] = TFRegNetMainLayer(lowercase , name='regnet' ) # classification head A_ : Union[str, Any] = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(lowercase ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase_ ( self , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase=False , ): """simple docstring""" A_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) A_ : int = return_dict if return_dict is not None else self.config.use_return_dict A_ : List[Any] = self.regnet( lowercase , output_hidden_states=lowercase , return_dict=lowercase , training=lowercase ) A_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1] A_ : List[Any] = self.classifier[0](lowercase ) A_ : Union[str, Any] = self.classifier[1](lowercase ) A_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase , logits=lowercase ) if not return_dict: A_ : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=lowercase , logits=lowercase , hidden_states=outputs.hidden_states )
718
import random def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Tuple = num - 1 A_ : Optional[Any] = 0 while s % 2 == 0: A_ : Optional[int] = s // 2 t += 1 for _ in range(5 ): A_ : Optional[int] = random.randrange(2 ,num - 1 ) A_ : Any = pow(__lowercase ,__lowercase ,__lowercase ) if v != 1: A_ : List[str] = 0 while v != (num - 1): if i == t - 1: return False else: A_ : Union[str, Any] = i + 1 A_ : Tuple = (v**2) % num return True def UpperCamelCase ( __lowercase : int ): '''simple docstring''' if num < 2: return False A_ : Optional[Any] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(__lowercase ) def UpperCamelCase ( __lowercase : int = 10_24 ): '''simple docstring''' while True: A_ : Union[str, Any] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) ) if is_prime_low_num(__lowercase ): return num if __name__ == "__main__": _UpperCAmelCase = generate_large_prime() print(("""Prime number:""", num)) print(("""is_prime_low_num:""", is_prime_low_num(num)))
70
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _UpperCAmelCase = logging.get_logger(__name__) _UpperCAmelCase = OrderedDict( [ ("""align""", """EfficientNetImageProcessor"""), ("""beit""", """BeitImageProcessor"""), ("""bit""", """BitImageProcessor"""), ("""blip""", """BlipImageProcessor"""), ("""blip-2""", """BlipImageProcessor"""), ("""bridgetower""", """BridgeTowerImageProcessor"""), ("""chinese_clip""", """ChineseCLIPImageProcessor"""), ("""clip""", """CLIPImageProcessor"""), ("""clipseg""", """ViTImageProcessor"""), ("""conditional_detr""", """ConditionalDetrImageProcessor"""), ("""convnext""", """ConvNextImageProcessor"""), ("""convnextv2""", """ConvNextImageProcessor"""), ("""cvt""", """ConvNextImageProcessor"""), ("""data2vec-vision""", """BeitImageProcessor"""), ("""deformable_detr""", """DeformableDetrImageProcessor"""), ("""deit""", """DeiTImageProcessor"""), ("""deta""", """DetaImageProcessor"""), ("""detr""", """DetrImageProcessor"""), ("""dinat""", """ViTImageProcessor"""), ("""donut-swin""", """DonutImageProcessor"""), ("""dpt""", """DPTImageProcessor"""), ("""efficientformer""", """EfficientFormerImageProcessor"""), ("""efficientnet""", """EfficientNetImageProcessor"""), ("""flava""", """FlavaImageProcessor"""), ("""focalnet""", """BitImageProcessor"""), ("""git""", """CLIPImageProcessor"""), ("""glpn""", """GLPNImageProcessor"""), ("""groupvit""", """CLIPImageProcessor"""), ("""imagegpt""", """ImageGPTImageProcessor"""), ("""instructblip""", """BlipImageProcessor"""), ("""layoutlmv2""", """LayoutLMv2ImageProcessor"""), ("""layoutlmv3""", """LayoutLMv3ImageProcessor"""), ("""levit""", """LevitImageProcessor"""), ("""mask2former""", """Mask2FormerImageProcessor"""), ("""maskformer""", """MaskFormerImageProcessor"""), ("""mgp-str""", """ViTImageProcessor"""), ("""mobilenet_v1""", """MobileNetV1ImageProcessor"""), ("""mobilenet_v2""", """MobileNetV2ImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevitv2""", """MobileViTImageProcessor"""), ("""nat""", """ViTImageProcessor"""), ("""oneformer""", """OneFormerImageProcessor"""), ("""owlvit""", """OwlViTImageProcessor"""), ("""perceiver""", """PerceiverImageProcessor"""), ("""pix2struct""", """Pix2StructImageProcessor"""), ("""poolformer""", """PoolFormerImageProcessor"""), ("""regnet""", """ConvNextImageProcessor"""), ("""resnet""", """ConvNextImageProcessor"""), ("""sam""", """SamImageProcessor"""), ("""segformer""", """SegformerImageProcessor"""), ("""swiftformer""", """ViTImageProcessor"""), ("""swin""", """ViTImageProcessor"""), ("""swin2sr""", """Swin2SRImageProcessor"""), ("""swinv2""", """ViTImageProcessor"""), ("""table-transformer""", """DetrImageProcessor"""), ("""timesformer""", """VideoMAEImageProcessor"""), ("""tvlt""", """TvltImageProcessor"""), ("""upernet""", """SegformerImageProcessor"""), ("""van""", """ConvNextImageProcessor"""), ("""videomae""", """VideoMAEImageProcessor"""), ("""vilt""", """ViltImageProcessor"""), ("""vit""", """ViTImageProcessor"""), ("""vit_hybrid""", """ViTHybridImageProcessor"""), ("""vit_mae""", """ViTImageProcessor"""), ("""vit_msn""", """ViTImageProcessor"""), ("""xclip""", """CLIPImageProcessor"""), ("""yolos""", """YolosImageProcessor"""), ] ) _UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def UpperCamelCase ( __lowercase : str ): '''simple docstring''' for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: A_ : Tuple = model_type_to_module_name(__lowercase ) A_ : List[Any] = importlib.import_module(f'''.{module_name}''' ,'transformers.models' ) try: return getattr(__lowercase ,__lowercase ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(__lowercase ,'__name__' ,__lowercase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. A_ : str = importlib.import_module('transformers' ) if hasattr(__lowercase ,__lowercase ): return getattr(__lowercase ,__lowercase ) return None def UpperCamelCase ( __lowercase : Union[str, os.PathLike] ,__lowercase : Optional[Union[str, os.PathLike]] = None ,__lowercase : bool = False ,__lowercase : bool = False ,__lowercase : Optional[Dict[str, str]] = None ,__lowercase : Optional[Union[bool, str]] = None ,__lowercase : Optional[str] = None ,__lowercase : bool = False ,**__lowercase : Dict ,): '''simple docstring''' A_ : Optional[Any] = get_file_from_repo( __lowercase ,__lowercase ,cache_dir=__lowercase ,force_download=__lowercase ,resume_download=__lowercase ,proxies=__lowercase ,use_auth_token=__lowercase ,revision=__lowercase ,local_files_only=__lowercase ,) if resolved_config_file is None: logger.info( 'Could not locate the image processor configuration file, will try to use the model config instead.' ) return {} with open(__lowercase ,encoding='utf-8' ) as reader: return json.load(__lowercase ) class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" raise EnvironmentError( 'AutoImageProcessor is designed to be instantiated ' 'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' ) @classmethod @replace_list_option_in_docstrings(lowercase ) def lowerCAmelCase_ ( cls , lowercase , **lowercase ): """simple docstring""" A_ : List[Any] = kwargs.pop('config' , lowercase ) A_ : Dict = kwargs.pop('trust_remote_code' , lowercase ) A_ : Optional[Any] = True A_ : Optional[Any] = ImageProcessingMixin.get_image_processor_dict(lowercase , **lowercase ) A_ : Dict = config_dict.get('image_processor_type' , lowercase ) A_ : Dict = None if "AutoImageProcessor" in config_dict.get('auto_map' , {} ): A_ : Any = config_dict['auto_map']['AutoImageProcessor'] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: A_ : Tuple = config_dict.pop('feature_extractor_type' , lowercase ) if feature_extractor_class is not None: logger.warning( 'Could not find image processor class in the image processor config or the model config. Loading' ' based on pattern matching with the model\'s feature extractor configuration.' ) A_ : Optional[Any] = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' ) if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ): A_ : Union[str, Any] = config_dict['auto_map']['AutoFeatureExtractor'] A_ : Union[str, Any] = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' ) logger.warning( 'Could not find image processor auto map in the image processor config or the model config.' ' Loading based on pattern matching with the model\'s feature extractor configuration.' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(lowercase , lowercase ): A_ : Dict = AutoConfig.from_pretrained(lowercase , **lowercase ) # It could be in `config.image_processor_type`` A_ : Optional[Any] = getattr(lowercase , 'image_processor_type' , lowercase ) if hasattr(lowercase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map: A_ : Tuple = config.auto_map['AutoImageProcessor'] if image_processor_class is not None: A_ : List[str] = image_processor_class_from_name(lowercase ) A_ : Optional[Any] = image_processor_auto_map is not None A_ : List[str] = image_processor_class is not None or type(lowercase ) in IMAGE_PROCESSOR_MAPPING A_ : Optional[int] = resolve_trust_remote_code( lowercase , lowercase , lowercase , lowercase ) if has_remote_code and trust_remote_code: A_ : Dict = get_class_from_dynamic_module( lowercase , lowercase , **lowercase ) A_ : Union[str, Any] = kwargs.pop('code_revision' , lowercase ) if os.path.isdir(lowercase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(lowercase , **lowercase ) elif image_processor_class is not None: return image_processor_class.from_dict(lowercase , **lowercase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(lowercase ) in IMAGE_PROCESSOR_MAPPING: A_ : Union[str, Any] = IMAGE_PROCESSOR_MAPPING[type(lowercase )] return image_processor_class.from_dict(lowercase , **lowercase ) raise ValueError( F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def lowerCAmelCase_ ( lowercase , lowercase ): """simple docstring""" IMAGE_PROCESSOR_MAPPING.register(lowercase , lowercase )
719
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCAmelCase = { """configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""], """tokenization_m2m_100""": ["""M2M100Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase = [ """M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""", """M2M100ForConditionalGeneration""", """M2M100Model""", """M2M100PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
70
0
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 _UpperCAmelCase = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 _UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" A_ : str = WATERMARK_BITS A_ : Dict = WatermarkEncoder() self.encoder.set_watermark('bits' , self.watermark ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if images.shape[-1] < 2_5_6: return images A_ : str = (2_5_5 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() A_ : Optional[Any] = [self.encoder.encode(lowercase , 'dwtDct' ) for image in images] A_ : Optional[int] = torch.from_numpy(np.array(lowercase ) ).permute(0 , 3 , 1 , 2 ) A_ : Optional[int] = torch.clamp(2 * (images / 2_5_5 - 0.5) , min=-1.0 , max=1.0 ) return images
720
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = FlaxAutoencoderKL @property def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = 4 A_ : int = 3 A_ : List[str] = (3_2, 3_2) A_ : Any = jax.random.PRNGKey(0 ) A_ : int = jax.random.uniform(lowercase , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } A_ : int = self.dummy_input return init_dict, inputs_dict
70
0
from math import isqrt, loga def UpperCamelCase ( __lowercase : int ): '''simple docstring''' A_ : Dict = [True] * max_number for i in range(2 ,isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 ,__lowercase ,__lowercase ): A_ : List[str] = False return [i for i in range(2 ,__lowercase ) if is_prime[i]] def UpperCamelCase ( __lowercase : int = 80_08_00 ,__lowercase : int = 80_08_00 ): '''simple docstring''' A_ : str = degree * loga(__lowercase ) A_ : Dict = int(__lowercase ) A_ : Dict = calculate_prime_numbers(__lowercase ) A_ : Any = 0 A_ : Optional[Any] = 0 A_ : List[Any] = len(__lowercase ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(F"""{solution() = }""")
721
import numpy as np _UpperCAmelCase = [ ["""a""", """b""", """c""", """d""", """e"""], ["""f""", """g""", """h""", """i""", """k"""], ["""l""", """m""", """n""", """o""", """p"""], ["""q""", """r""", """s""", """t""", """u"""], ["""v""", """w""", """x""", """y""", """z"""], ] class UpperCAmelCase : '''simple docstring''' def __init__( self ): """simple docstring""" A_ : Any = np.array(lowercase ) def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE ) A_ : List[str] = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCAmelCase_ ( self , lowercase , lowercase ): """simple docstring""" A_ : int = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : int = message.lower() A_ : Tuple = message.replace(' ' , '' ) A_ : int = message.replace('j' , 'i' ) A_ : Any = np.empty((2, len(lowercase )) ) for letter_index in range(len(lowercase ) ): A_ : Optional[int] = self.letter_to_numbers(message[letter_index] ) A_ : Union[str, Any] = numbers[0] A_ : Union[str, Any] = numbers[1] A_ : Optional[int] = first_step.reshape(2 * len(lowercase ) ) A_ : int = '' for numbers_index in range(len(lowercase ) ): A_ : str = int(second_step[numbers_index * 2] ) A_ : str = int(second_step[(numbers_index * 2) + 1] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : Tuple = encoded_message + letter return encoded_message def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" A_ : Optional[int] = message.lower() message.replace(' ' , '' ) A_ : Tuple = np.empty(2 * len(lowercase ) ) for letter_index in range(len(lowercase ) ): A_ : Optional[Any] = self.letter_to_numbers(message[letter_index] ) A_ : Optional[int] = numbers[0] A_ : Dict = numbers[1] A_ : Optional[int] = first_step.reshape((2, len(lowercase )) ) A_ : List[str] = '' for numbers_index in range(len(lowercase ) ): A_ : List[Any] = int(second_step[0, numbers_index] ) A_ : Optional[int] = int(second_step[1, numbers_index] ) A_ : Tuple = self.numbers_to_letter(lowercase , lowercase ) A_ : str = decoded_message + letter return decoded_message
70
0
'''simple docstring''' import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _UpperCAmelCase = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def UpperCamelCase ( __lowercase : List[str] ,__lowercase : List[Any] ): '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def UpperCamelCase ( __lowercase : List[str] ): '''simple docstring''' A_ : Union[str, Any] = _TestCommandArgs(dataset=__lowercase ,all_configs=__lowercase ,save_infos=__lowercase ) A_ : Any = TestCommand(*__lowercase ) test_command.run() A_ : str = os.path.join(__lowercase ,'README.md' ) assert os.path.exists(__lowercase ) A_ : int = DatasetInfosDict.from_directory(__lowercase ) A_ : Optional[int] = DatasetInfosDict( { 'default': DatasetInfo( features=Features( { 'tokens': Sequence(Value('string' ) ), 'ner_tags': Sequence( ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ), 'langs': Sequence(Value('string' ) ), 'spans': Sequence(Value('string' ) ), } ) ,splits=[ { 'name': 'train', 'num_bytes': 2_35_15_63, 'num_examples': 1_00_00, }, { 'name': 'validation', 'num_bytes': 23_84_18, 'num_examples': 10_00, }, ] ,download_size=3_94_06_80 ,dataset_size=2_58_99_81 ,) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: A_ : Tuple = getattr(dataset_infos['default'] ,__lowercase ), getattr(expected_dataset_infos['default'] ,__lowercase ) if key == "num_bytes": assert is_apercent_close(__lowercase ,__lowercase ) elif key == "splits": assert list(__lowercase ) == list(__lowercase ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes ,expected[split].num_bytes ) else: result == expected
700
from math import sqrt def UpperCamelCase ( __lowercase : int = 1_00_00_00 ): '''simple docstring''' A_ : int = 0 A_ : int = 0 A_ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(__lowercase ,sum_shortest_sides // 2 ) - max(1 ,sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(F"""{solution() = }""")
70
0
def UpperCamelCase ( __lowercase ,__lowercase ,__lowercase ): '''simple docstring''' def count_of_possible_combinations(__lowercase ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__lowercase ) def UpperCamelCase ( __lowercase ,__lowercase ,__lowercase ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( __lowercase ,__lowercase ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] A_ : Optional[int] = sum( count_of_possible_combinations_with_dp_array(target - item ,__lowercase ) for item in array ) A_ : Any = answer return answer A_ : str = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__lowercase ,__lowercase ) def UpperCamelCase ( __lowercase ,__lowercase ,__lowercase ): '''simple docstring''' A_ : Optional[int] = [0] * (target + 1) A_ : Optional[Any] = 1 for i in range(1 ,target + 1 ): for j in range(__lowercase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _UpperCAmelCase = 3 _UpperCAmelCase = 5 _UpperCAmelCase = [1, 2, 5] print(combination_sum_iv(n, array, target))
701
import re from typing import Callable, List, Optional, Union import tensorflow as tf try: from tensorflow.keras.optimizers.legacy import Adam except ImportError: from tensorflow.keras.optimizers import Adam class UpperCAmelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase , lowercase = 1.0 , lowercase = None , ): """simple docstring""" super().__init__() A_ : Tuple = initial_learning_rate A_ : List[str] = warmup_steps A_ : int = power A_ : Dict = decay_schedule_fn A_ : Any = name def __call__( self , lowercase ): """simple docstring""" with tf.name_scope(self.name or 'WarmUp' ) as name: # Implements polynomial warmup. i.e., if global_step < warmup_steps, the # learning rate will be `global_step/num_warmup_steps * init_lr`. A_ : Optional[int] = tf.cast(lowercase , tf.floataa ) A_ : int = tf.cast(self.warmup_steps , tf.floataa ) A_ : Optional[int] = global_step_float / warmup_steps_float A_ : Optional[Any] = self.initial_learning_rate * tf.math.pow(lowercase , self.power ) return tf.cond( global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=lowercase , ) def lowerCAmelCase_ ( self ): """simple docstring""" return { "initial_learning_rate": self.initial_learning_rate, "decay_schedule_fn": self.decay_schedule_fn, "warmup_steps": self.warmup_steps, "power": self.power, "name": self.name, } def UpperCamelCase ( __lowercase : float ,__lowercase : int ,__lowercase : int ,__lowercase : float = 0.0 ,__lowercase : float = 0.9 ,__lowercase : float = 0.9_99 ,__lowercase : float = 1e-8 ,__lowercase : Optional[float] = None ,__lowercase : Optional[float] = None ,__lowercase : float = 0.0 ,__lowercase : float = 1.0 ,__lowercase : Optional[List[str]] = None ,): '''simple docstring''' A_ : List[str] = tf.keras.optimizers.schedules.PolynomialDecay( initial_learning_rate=__lowercase ,decay_steps=num_train_steps - num_warmup_steps ,end_learning_rate=init_lr * min_lr_ratio ,power=__lowercase ,) if num_warmup_steps: A_ : Tuple = WarmUp( initial_learning_rate=__lowercase ,decay_schedule_fn=__lowercase ,warmup_steps=__lowercase ,) if weight_decay_rate > 0.0: A_ : Union[str, Any] = AdamWeightDecay( learning_rate=__lowercase ,weight_decay_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] ,include_in_weight_decay=__lowercase ,) else: A_ : Dict = tf.keras.optimizers.Adam( learning_rate=__lowercase ,beta_a=__lowercase ,beta_a=__lowercase ,epsilon=__lowercase ,clipnorm=__lowercase ,global_clipnorm=__lowercase ,) # We return the optimizer and the LR scheduler in order to better track the # evolution of the LR independently of the optimizer. return optimizer, lr_schedule class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self , lowercase = 0.001 , lowercase = 0.9 , lowercase = 0.999 , lowercase = 1E-7 , lowercase = False , lowercase = 0.0 , lowercase = None , lowercase = None , lowercase = "AdamWeightDecay" , **lowercase , ): """simple docstring""" super().__init__(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , **lowercase ) A_ : Dict = weight_decay_rate A_ : Union[str, Any] = include_in_weight_decay A_ : str = exclude_from_weight_decay @classmethod def lowerCAmelCase_ ( cls , lowercase ): """simple docstring""" A_ : Tuple = {'WarmUp': WarmUp} return super(lowercase , cls ).from_config(lowercase , custom_objects=lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" super(lowercase , self )._prepare_local(lowercase , lowercase , lowercase ) A_ : Optional[Any] = tf.constant( self.weight_decay_rate , name='adam_weight_decay_rate' ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" A_ : Dict = self._do_use_weight_decay(var.name ) if do_decay: return var.assign_sub( learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , ) return tf.no_op() def lowerCAmelCase_ ( self , lowercase , lowercase=None , **lowercase ): """simple docstring""" A_ , A_ : Optional[int] = list(zip(*lowercase ) ) return super(lowercase , self ).apply_gradients(zip(lowercase , lowercase ) , name=lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase ): """simple docstring""" if apply_state is None: return self._decayed_lr_t[var_dtype], {} A_ : List[str] = apply_state or {} A_ : Dict = apply_state.get((var_device, var_dtype) ) if coefficients is None: A_ : Dict = self._fallback_apply_state(lowercase , lowercase ) A_ : int = coefficients return coefficients["lr_t"], {"apply_state": apply_state} def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Union[str, Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_dense(lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None ): """simple docstring""" A_ , A_ : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , lowercase ) A_ : Optional[Any] = self._decay_weights_op(lowercase , lowercase , lowercase ) with tf.control_dependencies([decay] ): return super(lowercase , self )._resource_apply_sparse(lowercase , lowercase , lowercase , **lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[int] = super().get_config() config.update({'weight_decay_rate': self.weight_decay_rate} ) return config def lowerCAmelCase_ ( self , lowercase ): """simple docstring""" if self.weight_decay_rate == 0: return False if self._include_in_weight_decay: for r in self._include_in_weight_decay: if re.search(lowercase , lowercase ) is not None: return True if self._exclude_from_weight_decay: for r in self._exclude_from_weight_decay: if re.search(lowercase , lowercase ) is not None: return False return True class UpperCAmelCase ( __A ): '''simple docstring''' def __init__( self ): """simple docstring""" A_ : int = [] A_ : Optional[int] = None @property def lowerCAmelCase_ ( self ): """simple docstring""" if self._accum_steps is None: A_ : int = tf.Variable( tf.constant(0 , dtype=tf.intaa ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) return self._accum_steps.value() @property def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: raise ValueError('The accumulator should be called first to initialize the gradients' ) return [gradient.value() if gradient is not None else gradient for gradient in self._gradients] def __call__( self , lowercase ): """simple docstring""" if not self._gradients: A_ : Optional[Any] = self.step # Create the step variable. self._gradients.extend( [ tf.Variable( tf.zeros_like(lowercase ) , trainable=lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , ) if gradient is not None else gradient for gradient in gradients ] ) if len(lowercase ) != len(self._gradients ): raise ValueError(F'''Expected {len(self._gradients )} gradients, but got {len(lowercase )}''' ) for accum_gradient, gradient in zip(self._gradients , lowercase ): if accum_gradient is not None and gradient is not None: accum_gradient.assign_add(lowercase ) self._accum_steps.assign_add(1 ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self._gradients: return self._accum_steps.assign(0 ) for gradient in self._gradients: if gradient is not None: gradient.assign(tf.zeros_like(lowercase ) )
70
0
import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) _UpperCAmelCase = """hf-internal-testing/tiny-random-bert""" _UpperCAmelCase = os.path.join(TRANSFORMERS_CACHE, """models--hf-internal-testing--tiny-random-bert""") _UpperCAmelCase = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6""" class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Union[str, Any] = cached_file(lowercase , lowercase ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(lowercase ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(lowercase , lowercase ) ) ) with open(os.path.join(lowercase , 'refs' , 'main' ) ) as f: A_ : Optional[Any] = f.read() self.assertEqual(lowercase , os.path.join(lowercase , 'snapshots' , lowercase , lowercase ) ) self.assertTrue(os.path.isfile(lowercase ) ) # File is cached at the same place the second time. A_ : Optional[Any] = cached_file(lowercase , lowercase ) self.assertEqual(lowercase , lowercase ) # Using a specific revision to test the full commit hash. A_ : List[str] = cached_file(lowercase , lowercase , revision='9b8c223' ) self.assertEqual(lowercase , os.path.join(lowercase , 'snapshots' , lowercase , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaisesRegex(lowercase , 'is not a valid model identifier' ): A_ : Tuple = cached_file('tiny-random-bert' , lowercase ) with self.assertRaisesRegex(lowercase , 'is not a valid git identifier' ): A_ : int = cached_file(lowercase , lowercase , revision='aaaa' ) with self.assertRaisesRegex(lowercase , 'does not appear to have a file named' ): A_ : List[Any] = cached_file(lowercase , 'conf' ) def lowerCAmelCase_ ( self ): """simple docstring""" with self.assertRaisesRegex(lowercase , 'does not appear to have a file named' ): A_ : Optional[int] = cached_file(lowercase , 'conf' ) with open(os.path.join(lowercase , 'refs' , 'main' ) ) as f: A_ : Union[str, Any] = f.read() self.assertTrue(os.path.isfile(os.path.join(lowercase , '.no_exist' , lowercase , 'conf' ) ) ) A_ : str = cached_file(lowercase , 'conf' , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A_ : int = cached_file(lowercase , 'conf' , local_files_only=lowercase , _raise_exceptions_for_missing_entries=lowercase ) self.assertIsNone(lowercase ) A_ : Dict = mock.Mock() A_ : Optional[int] = 5_0_0 A_ : List[str] = {} A_ : Any = HTTPError A_ : Any = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=lowercase ) as mock_head: A_ : str = cached_file(lowercase , 'conf' , _raise_exceptions_for_connection_errors=lowercase ) self.assertIsNone(lowercase ) # This check we did call the fake head request mock_head.assert_called() def lowerCAmelCase_ ( self ): """simple docstring""" self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , lowercase ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowercase ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , lowercase ) ) def lowerCAmelCase_ ( self ): """simple docstring""" self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(lowercase , 'is not a valid model identifier' ): get_file_from_repo('bert-base-case' , lowercase ) # The function raises if the revision does not exist. with self.assertRaisesRegex(lowercase , 'is not a valid git identifier' ): get_file_from_repo('bert-base-cased' , lowercase , revision='ahaha' ) A_ : List[Any] = get_file_from_repo('bert-base-cased' , lowercase ) # The name is the cached name which is not very easy to test, so instead we load the content. A_ : List[str] = json.loads(open(lowercase , 'r' ).read() ) self.assertEqual(config['hidden_size'] , 7_6_8 ) def lowerCAmelCase_ ( self ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: A_ : List[str] = Path(lowercase ) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(lowercase , 'a.txt' ) , str(lowercase ) ) self.assertIsNone(get_file_from_repo(lowercase , 'b.txt' ) )
702
from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : Any = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModel.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = AutoModel.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : int = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForPreTraining.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = AutoModelForPreTraining.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Dict = TFAutoModelForCausalLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Tuple = AutoModelForCausalLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForCausalLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Tuple = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : int = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : str = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : str = TFAutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForMaskedLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : Tuple = AutoModelForMaskedLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A_ : Dict = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase , from_pt=lowercase ) A_ , A_ : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase , from_tf=lowercase ) A_ , A_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained( lowercase , output_loading_info=lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[str] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" for model_name in ["bert-base-uncased"]: A_ : List[Any] = AutoConfig.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) A_ : List[Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsNotNone(lowercase ) self.assertIsInstance(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Optional[Any] = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = TFAutoModelWithLMHead.from_pretrained(lowercase , from_pt=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 ) A_ : Dict = AutoModelWithLMHead.from_pretrained(lowercase , from_tf=lowercase ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual(model.num_parameters() , 1_4_4_1_0 ) self.assertEqual(model.num_parameters(only_trainable=lowercase ) , 1_4_4_1_0 )
70
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def UpperCamelCase ( __lowercase : List[Any] ): '''simple docstring''' return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Optional[Any] ): '''simple docstring''' A_ : Dict = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue A_ : Optional[Any] = key.replace('heads.cmd.mim_head.cls.predictions' ,'mmm_image_head' ) A_ : Optional[Any] = key.replace('heads.cmd.mlm_head.cls.predictions' ,'mmm_text_head' ) A_ : Tuple = key.replace('heads.cmd.itm_head.cls' ,'itm_head' ) A_ : List[str] = key.replace('heads.cmd.itm_head.pooler' ,'itm_head.pooler' ) A_ : Optional[int] = key.replace('heads.cmd.clip_head.logit_scale' ,'flava.logit_scale' ) A_ : Union[str, Any] = key.replace('heads.fairseq_mlm.cls.predictions' ,'mlm_head' ) A_ : List[Any] = key.replace('heads.imagenet.mim_head.cls.predictions' ,'mim_head' ) A_ : List[str] = key.replace('mm_text_projection' ,'flava.text_to_mm_projection' ) A_ : Tuple = key.replace('mm_image_projection' ,'flava.image_to_mm_projection' ) A_ : Optional[int] = key.replace('image_encoder.module' ,'flava.image_model' ) A_ : Union[str, Any] = key.replace('text_encoder.module' ,'flava.text_model' ) A_ : Dict = key.replace('mm_encoder.module.encoder.cls_token' ,'flava.multimodal_model.cls_token' ) A_ : int = key.replace('mm_encoder.module' ,'flava.multimodal_model' ) A_ : List[Any] = key.replace('text_projection' ,'flava.text_projection' ) A_ : Optional[int] = key.replace('image_projection' ,'flava.image_projection' ) A_ : List[str] = value.float() for key, value in codebook_state_dict.items(): A_ : int = value return upgrade @torch.no_grad() def UpperCamelCase ( __lowercase : int ,__lowercase : Tuple ,__lowercase : Tuple ,__lowercase : Tuple=None ): '''simple docstring''' if config_path is not None: A_ : List[str] = FlavaConfig.from_pretrained(__lowercase ) else: A_ : Dict = FlavaConfig() A_ : Union[str, Any] = FlavaForPreTraining(__lowercase ).eval() A_ : int = convert_dalle_checkpoint(__lowercase ,__lowercase ,save_checkpoint=__lowercase ) if os.path.exists(__lowercase ): A_ : str = torch.load(__lowercase ,map_location='cpu' ) else: A_ : Optional[int] = torch.hub.load_state_dict_from_url(__lowercase ,map_location='cpu' ) A_ : Any = upgrade_state_dict(__lowercase ,__lowercase ) hf_model.load_state_dict(__lowercase ) A_ : Optional[int] = hf_model.state_dict() A_ : List[str] = count_parameters(__lowercase ) A_ : int = count_parameters(__lowercase ) + count_parameters(__lowercase ) assert torch.allclose(__lowercase ,__lowercase ,atol=1e-3 ) hf_model.save_pretrained(__lowercase ) if __name__ == "__main__": _UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") _UpperCAmelCase = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
703
def UpperCamelCase ( __lowercase : str ): '''simple docstring''' A_ : int = len(__lowercase ) A_ : List[Any] = sum(__lowercase ) A_ : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 ,n + 1 ): A_ : Optional[Any] = True for i in range(1 ,s + 1 ): A_ : Tuple = False for i in range(1 ,n + 1 ): for j in range(1 ,s + 1 ): A_ : Dict = dp[i][j - 1] if arr[i - 1] <= j: A_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) ,-1 ,-1 ): if dp[n][j] is True: A_ : List[Any] = s - 2 * j break return diff
70
0
_UpperCAmelCase = { """A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""", """H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""", """O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""", """V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""", """2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""", """8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""", """:""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""", """?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""", """(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/""" } # Exclamation mark is not in ITU-R recommendation # fmt: on _UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()} def UpperCamelCase ( __lowercase : str ): '''simple docstring''' return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def UpperCamelCase ( __lowercase : str ): '''simple docstring''' return "".join(REVERSE_DICT[char] for char in message.split() ) def UpperCamelCase ( ): '''simple docstring''' A_ : Optional[Any] = 'Morse code here!' print(__lowercase ) A_ : List[str] = encrypt(__lowercase ) print(__lowercase ) A_ : Tuple = decrypt(__lowercase ) print(__lowercase ) if __name__ == "__main__": main()
704
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : List[Any] = get_activation('gelu' ) self.assertTrue(torch.allclose(gelu_python(lowercase ) , torch_builtin(lowercase ) ) ) self.assertFalse(torch.allclose(gelu_python(lowercase ) , gelu_new(lowercase ) ) ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Tuple = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) A_ : str = get_activation('gelu' ) A_ : int = get_activation('gelu_10' ) A_ : Optional[int] = torch_builtin(lowercase ) A_ : Tuple = geluaa(lowercase ) A_ : Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 ) self.assertTrue(torch.max(lowercase ).item() == 10.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def lowerCAmelCase_ ( self ): """simple docstring""" get_activation('gelu' ) get_activation('gelu_10' ) get_activation('gelu_fast' ) get_activation('gelu_new' ) get_activation('gelu_python' ) get_activation('gelu_pytorch_tanh' ) get_activation('linear' ) get_activation('mish' ) get_activation('quick_gelu' ) get_activation('relu' ) get_activation('sigmoid' ) get_activation('silu' ) get_activation('swish' ) get_activation('tanh' ) with self.assertRaises(lowercase ): get_activation('bogus' ) with self.assertRaises(lowercase ): get_activation(lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = get_activation('gelu' ) A_ : List[str] = 1 A_ : Optional[Any] = get_activation('gelu' ) self.assertEqual(acta.a , 1 ) with self.assertRaises(lowercase ): A_ : str = acta.a
70
0