code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1_024 , lowerCAmelCase_=1_024 , lowerCAmelCase_=False , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) _snake_case : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='''train''' , **lowerCAmelCase_ ) _snake_case : Dict = tok.pad_token_id def get_lens(lowerCAmelCase_ ): _snake_case : int = tqdm( DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) _snake_case : Union[str, Any] = [] for batch in dl: _snake_case : str = batch['''input_ids'''].ne(lowerCAmelCase_ ).sum(1 ).tolist() _snake_case : int = batch['''labels'''].ne(lowerCAmelCase_ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ): max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) ) else: max_lens.extend(lowerCAmelCase_ ) return max_lens _snake_case : Optional[Any] = get_lens(lowerCAmelCase_ ) _snake_case : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='''val''' , **lowerCAmelCase_ ) _snake_case : Union[str, Any] = get_lens(lowerCAmelCase_ ) pickle_save(lowerCAmelCase_ , train_ds.len_file ) pickle_save(lowerCAmelCase_ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def _a ( lowerCAmelCase_ ): """simple docstring""" return EnvironmentCommand() def _a ( lowerCAmelCase_ ): """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class lowerCamelCase (a__ ): @staticmethod def UpperCAmelCase_ ( lowercase__ ) -> Tuple: """simple docstring""" _snake_case : Any = parser.add_parser('''env''' ) download_parser.set_defaults(func=lowercase__ ) download_parser.add_argument( '''--accelerate-config_file''' , default=lowercase__ , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=lowercase__ ) def __init__( self , lowercase__ , *lowercase__ ) -> None: """simple docstring""" _snake_case : List[Any] = accelerate_config_file def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = '''not installed''' if is_safetensors_available(): import safetensors _snake_case : Any = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors _snake_case : str = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' _snake_case : Tuple = '''not installed''' _snake_case : List[Any] = '''not found''' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _snake_case : Any = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(lowercase__ ): _snake_case : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict() _snake_case : Optional[int] = ( '''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(lowercase__ , lowercase__ ) else F'''\t{accelerate_config}''' ) _snake_case : Tuple = '''not installed''' _snake_case : List[Any] = '''NA''' if is_torch_available(): import torch _snake_case : Tuple = torch.__version__ _snake_case : Any = torch.cuda.is_available() _snake_case : Optional[int] = '''not installed''' _snake_case : Tuple = '''NA''' if is_tf_available(): import tensorflow as tf _snake_case : Optional[Any] = tf.__version__ try: # deprecated in v2.1 _snake_case : List[Any] = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _snake_case : List[str] = bool(tf.config.list_physical_devices('''GPU''' ) ) _snake_case : Union[str, Any] = '''not installed''' _snake_case : Tuple = '''not installed''' _snake_case : Union[str, Any] = '''not installed''' _snake_case : Optional[Any] = '''NA''' if is_flax_available(): import flax import jax import jaxlib _snake_case : Optional[Any] = flax.__version__ _snake_case : int = jax.__version__ _snake_case : Union[str, Any] = jaxlib.__version__ _snake_case : Tuple = jax.lib.xla_bridge.get_backend().platform _snake_case : List[str] = { '''`transformers` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Huggingface_hub version''': huggingface_hub.__version__, '''Safetensors version''': F'''{safetensors_version}''', '''Accelerate version''': F'''{accelerate_version}''', '''Accelerate config''': F'''{accelerate_config_str}''', '''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''', '''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''', '''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''', '''Jax version''': F'''{jax_version}''', '''JaxLib version''': F'''{jaxlib_version}''', '''Using GPU in script?''': '''<fill in>''', '''Using distributed or parallel set-up in script?''': '''<fill in>''', } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(lowercase__ ) ) return info @staticmethod def UpperCAmelCase_ ( lowercase__ ) -> Optional[int]: """simple docstring""" return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[str] = 384 if "tiny" in model_name: _snake_case : Optional[int] = [3, 3, 9, 3] _snake_case : Any = [96, 192, 384, 768] if "small" in model_name: _snake_case : Tuple = [3, 3, 27, 3] _snake_case : Optional[int] = [96, 192, 384, 768] if "base" in model_name: _snake_case : str = [3, 3, 27, 3] _snake_case : Any = [128, 256, 512, 1_024] _snake_case : Optional[Any] = 512 if "large" in model_name: _snake_case : Optional[int] = [3, 3, 27, 3] _snake_case : List[str] = [192, 384, 768, 1_536] _snake_case : Tuple = 768 if "xlarge" in model_name: _snake_case : Optional[int] = [3, 3, 27, 3] _snake_case : Union[str, Any] = [256, 512, 1_024, 2_048] _snake_case : Optional[int] = 1_024 # set label information _snake_case : Union[str, Any] = 150 _snake_case : Dict = '''huggingface/label-files''' _snake_case : Optional[int] = '''ade20k-id2label.json''' _snake_case : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) ) _snake_case : List[str] = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()} _snake_case : List[str] = ConvNextConfig( depths=lowerCAmelCase_ , hidden_sizes=lowerCAmelCase_ , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) _snake_case : str = UperNetConfig( backbone_config=lowerCAmelCase_ , auxiliary_in_channels=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , ) return config def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') ) rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') ) if i > 0: rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') ) rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') ) rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') ) rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Any = dct.pop(lowerCAmelCase_ ) _snake_case : int = val def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } _snake_case : Optional[Any] = model_name_to_url[model_name] _snake_case : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='''cpu''' )['''state_dict'''] _snake_case : Dict = get_upernet_config(lowerCAmelCase_ ) _snake_case : List[str] = UperNetForSemanticSegmentation(lowerCAmelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _snake_case : Optional[int] = state_dict.pop(lowerCAmelCase_ ) if "bn" in key: _snake_case : List[Any] = key.replace('''bn''' , '''batch_norm''' ) _snake_case : Any = val # rename keys _snake_case : Optional[int] = create_rename_keys(lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) model.load_state_dict(lowerCAmelCase_ ) # verify on image _snake_case : Optional[int] = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' _snake_case : Dict = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert('''RGB''' ) _snake_case : Any = SegformerImageProcessor() _snake_case : int = processor(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values with torch.no_grad(): _snake_case : Tuple = model(lowerCAmelCase_ ) if model_name == "upernet-convnext-tiny": _snake_case : List[Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ) elif model_name == "upernet-convnext-small": _snake_case : Dict = torch.tensor( [[-8.8_236, -8.8_236, -8.6_771], [-8.8_236, -8.8_236, -8.6_771], [-8.7_638, -8.7_638, -8.6_240]] ) elif model_name == "upernet-convnext-base": _snake_case : str = torch.tensor( [[-8.8_558, -8.8_558, -8.6_905], [-8.8_558, -8.8_558, -8.6_905], [-8.7_669, -8.7_669, -8.6_021]] ) elif model_name == "upernet-convnext-large": _snake_case : Union[str, Any] = torch.tensor( [[-8.6_660, -8.6_660, -8.6_210], [-8.6_660, -8.6_660, -8.6_210], [-8.6_310, -8.6_310, -8.5_964]] ) elif model_name == "upernet-convnext-xlarge": _snake_case : List[Any] = torch.tensor( [[-8.4_980, -8.4_980, -8.3_977], [-8.4_980, -8.4_980, -8.3_977], [-8.4_379, -8.4_379, -8.3_412]] ) print('''Logits:''' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase_ ) print(f'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print(f'''Pushing model and processor for {model_name} to hub''' ) model.push_to_hub(f'''openmmlab/{model_name}''' ) processor.push_to_hub(f'''openmmlab/{model_name}''' ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='upernet-convnext-tiny', type=str, choices=[F"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']], help='Name of the ConvNext UperNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase : int = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() UpperCAmelCase : List[Any] = logging.get_logger('transformers.models.speecht5') def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" hf_model.apply_weight_norm() _snake_case : int = checkpoint['''input_conv.weight_g'''] _snake_case : str = checkpoint['''input_conv.weight_v'''] _snake_case : List[Any] = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): _snake_case : int = checkpoint[f'''upsamples.{i}.1.weight_g'''] _snake_case : List[Any] = checkpoint[f'''upsamples.{i}.1.weight_v'''] _snake_case : List[Any] = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _snake_case : str = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] _snake_case : Optional[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] _snake_case : Tuple = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] _snake_case : Tuple = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] _snake_case : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] _snake_case : Tuple = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] _snake_case : str = checkpoint['''output_conv.1.weight_g'''] _snake_case : str = checkpoint['''output_conv.1.weight_v'''] _snake_case : Any = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if config_path is not None: _snake_case : Any = SpeechTaHifiGanConfig.from_pretrained(lowerCAmelCase_ ) else: _snake_case : Dict = SpeechTaHifiGanConfig() _snake_case : str = SpeechTaHifiGan(lowerCAmelCase_ ) _snake_case : Optional[Any] = torch.load(lowerCAmelCase_ ) load_weights(orig_checkpoint['''model''']['''generator'''] , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = np.load(lowerCAmelCase_ ) _snake_case : List[Any] = stats[0].reshape(-1 ) _snake_case : Tuple = stats[1].reshape(-1 ) _snake_case : Any = torch.from_numpy(lowerCAmelCase_ ).float() _snake_case : Dict = torch.from_numpy(lowerCAmelCase_ ).float() model.save_pretrained(lowerCAmelCase_ ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint') parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase : Optional[int] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class lowerCamelCase (a__ ): _lowercase : Optional[int] = """umt5""" _lowercase : Optional[Any] = ["""past_key_values"""] def __init__( self , lowercase__=250_112 , lowercase__=512 , lowercase__=64 , lowercase__=1_024 , lowercase__=8 , lowercase__=None , lowercase__=6 , lowercase__=32 , lowercase__=128 , lowercase__=0.1 , lowercase__=1E-6 , lowercase__=1.0 , lowercase__="gated-gelu" , lowercase__=True , lowercase__=True , lowercase__="T5Tokenizer" , lowercase__=True , lowercase__=0 , lowercase__=1 , lowercase__=0 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__( is_encoder_decoder=lowercase__ , tokenizer_class=lowercase__ , tie_word_embeddings=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , **lowercase__ , ) _snake_case : str = vocab_size _snake_case : Any = d_model _snake_case : Union[str, Any] = d_kv _snake_case : List[str] = d_ff _snake_case : Dict = num_layers _snake_case : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry _snake_case : List[Any] = num_heads _snake_case : Dict = relative_attention_num_buckets _snake_case : List[Any] = relative_attention_max_distance _snake_case : int = dropout_rate _snake_case : List[Any] = layer_norm_epsilon _snake_case : Tuple = initializer_factor _snake_case : List[str] = feed_forward_proj _snake_case : Optional[Any] = use_cache _snake_case : Optional[Any] = self.feed_forward_proj.split('''-''' ) _snake_case : str = act_info[-1] _snake_case : int = act_info[0] == '''gated''' if len(lowercase__ ) > 1 and act_info[0] != "gated" or len(lowercase__ ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": _snake_case : Optional[int] = '''gelu_new''' @property def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.d_model @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return self.num_heads @property def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.num_layers class lowerCamelCase (a__ ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" _snake_case : Any = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: _snake_case : int = '''past_encoder_sequence + sequence''' _snake_case : Dict = {0: '''batch'''} _snake_case : List[str] = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: _snake_case : str = {0: '''batch''', 1: '''decoder_sequence'''} _snake_case : str = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(lowercase__ , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return 13 @property def UpperCAmelCase_ ( self ) -> float: """simple docstring""" return 5E-4
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Tuple = {'vocab_file': 'spiece.model'} UpperCAmelCase : str = { 'vocab_file': { 'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model', 'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model', } } UpperCAmelCase : Any = { 'AI-Sweden/gpt-sw3-126m': 2_0_4_8, 'AI-Sweden/gpt-sw3-350m': 2_0_4_8, 'AI-Sweden/gpt-sw3-1.6b': 2_0_4_8, 'AI-Sweden/gpt-sw3-6.7b': 2_0_4_8, 'AI-Sweden/gpt-sw3-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Any = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : List[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__ , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__ = None , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs _snake_case : List[str] = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) _snake_case : List[str] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing _snake_case : Optional[int] = '''<|endoftext|>''' if eos_token is None else eos_token _snake_case : Tuple = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: _snake_case : str = unk_token if pad_token is None else pad_token _snake_case : List[Any] = eos_token if bos_token is None else bos_token else: _snake_case : int = '''<pad>''' if pad_token is None else pad_token _snake_case : List[str] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=lowercase__ , remove_space=lowercase__ , keep_accents=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) _snake_case : Union[str, Any] = do_lower_case _snake_case : Optional[Any] = remove_space _snake_case : int = keep_accents _snake_case : Optional[Any] = vocab_file _snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase__ ) # Used for whitespace normalization in input texts # fmt : off _snake_case : str = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing _snake_case : Optional[Any] = re.compile( F'''[{"".join(map(lowercase__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(127 , 160 ) ) + [160, 173, 8_203] ) )}]''' ) def __getstate__( self ) -> int: """simple docstring""" _snake_case : int = self.__dict__.copy() _snake_case : Tuple = None return state def __setstate__( self , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _snake_case : int = {} _snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return len(self.sp_model ) def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" _snake_case : Optional[int] = self.non_printing_characters_re.sub('''''' , lowercase__ ) # Normalize whitespaces _snake_case : str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization _snake_case : str = unicodedata.normalize('''NFC''' , lowercase__ ) return text def UpperCAmelCase_ ( self , lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.preprocess_text(lowercase__ ) return self.sp_model.encode(lowercase__ , out_type=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> int: """simple docstring""" return self.sp_model.PieceToId(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" return self.sp_model.IdToPiece(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ ) -> str: """simple docstring""" return out_string def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" _snake_case : List[str] = [] _snake_case : Any = '''''' _snake_case : Union[str, Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowercase__ ) + token _snake_case : str = True _snake_case : Union[str, Any] = [] else: current_sub_tokens.append(lowercase__ ) _snake_case : int = False out_string += self.sp_model.decode(lowercase__ ) return out_string def UpperCAmelCase_ ( self ) -> Dict[str, int]: """simple docstring""" _snake_case : Any = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Union[str, Any] = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , '''wb''' ) as fi: _snake_case : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(lowercase__ , lowercase__ ): _snake_case : Optional[int] = self.preprocess_text(lowercase__ ) _snake_case : Union[str, Any] = self.sp_model.encode(lowercase__ ) else: _snake_case : Optional[int] = [self.preprocess_text(lowercase__ ) for t in text] _snake_case : Union[str, Any] = self.sp_model.encode(lowercase__ ) if return_tensors is True or return_tensors == "pt": _snake_case : Union[str, Any] = torch.tensor(lowercase__ ) return token_ids def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" return self.sp_model.decode(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : Optional[Any] = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()] _snake_case : Union[str, Any] = ( F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(lowercase__ ) + F'''{self.bos_token}Bot:''' ) return self.encode(text=lowercase__ )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path UpperCAmelCase : Dict = Path(__file__).resolve().parents[3] / 'src' sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) UpperCAmelCase : Optional[int] = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'} UpperCAmelCase : Union[str, Any] = 'zero2' UpperCAmelCase : Union[str, Any] = 'zero3' UpperCAmelCase : str = [ZEROa, ZEROa] def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Any = parameterized.to_safe_name('''_'''.join(str(lowerCAmelCase_ ) for x in param.args ) ) return f'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test UpperCAmelCase : List[str] = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class lowerCamelCase (a__ ): @parameterized.expand(lowercase__ , name_func=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Optional[Any]: """simple docstring""" self.run_and_check( stage=lowercase__ , model=lowercase__ , distributed=lowercase__ , fpaa=lowercase__ , ) @require_torch_multi_gpu @parameterized.expand(lowercase__ , name_func=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" self.run_and_check( stage=lowercase__ , model=lowercase__ , distributed=lowercase__ , fpaa=lowercase__ , ) @parameterized.expand(lowercase__ , name_func=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" self.run_and_check( stage=lowercase__ , model=lowercase__ , distributed=lowercase__ , fpaa=lowercase__ , ) @require_torch_multi_gpu @parameterized.expand(lowercase__ , name_func=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" self.run_and_check( stage=lowercase__ , model=lowercase__ , distributed=lowercase__ , fpaa=lowercase__ , ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]: """simple docstring""" pass def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = 10 , lowercase__ = True , lowercase__ = True , lowercase__ = True , ) -> Optional[Any]: """simple docstring""" _snake_case : Dict = models[model] _snake_case : int = self.run_trainer( stage=lowercase__ , model_name=lowercase__ , eval_steps=lowercase__ , num_train_epochs=1 , distributed=lowercase__ , fpaa=lowercase__ , ) self.do_checks(lowercase__ ) return output_dir def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = 10 , lowercase__ = 1 , lowercase__ = True , lowercase__ = True , ) -> int: """simple docstring""" _snake_case : Any = self.get_auto_remove_tmp_dir('''./xxx''' , after=lowercase__ ) _snake_case : str = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(lowercase__ )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(['''--fp16'''] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _snake_case : Dict = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _snake_case : Any = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _snake_case : Tuple = self.get_launcher(lowercase__ ) _snake_case : List[Any] = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(lowercase__ , env=self.get_env() ) return output_dir def UpperCAmelCase_ ( self , lowercase__=False ) -> Optional[Any]: """simple docstring""" _snake_case : Tuple = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = '▁' UpperCAmelCase : Dict = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'} UpperCAmelCase : Optional[Any] = { 'sentencepiece_model_file': 'sentencepiece.bpe.model', 'vocab_file': 'vocab.txt', } UpperCAmelCase : Any = { 'vocab_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt', }, 'sentencepiece_model_file': { 'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', 'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model', }, } UpperCAmelCase : Dict = { 'ernie-m-base': 5_1_4, 'ernie-m-large': 5_1_4, } UpperCAmelCase : List[str] = { 'ernie-m-base': {'do_lower_case': False}, 'ernie-m-large': {'do_lower_case': False}, } class lowerCamelCase (a__ ): _lowercase : List[str] = ["input_ids"] _lowercase : Any = VOCAB_FILES_NAMES _lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP _lowercase : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self , lowercase__ , lowercase__=None , lowercase__=False , lowercase__="utf8" , lowercase__="[UNK]" , lowercase__="[SEP]" , lowercase__="[PAD]" , lowercase__="[CLS]" , lowercase__="[MASK]" , lowercase__ = None , **lowercase__ , ) -> None: """simple docstring""" _snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , vocab_file=lowercase__ , encoding=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) _snake_case : Union[str, Any] = do_lower_case _snake_case : List[Any] = sentencepiece_model_ckpt _snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: _snake_case : Union[str, Any] = self.load_vocab(filepath=lowercase__ ) else: _snake_case : int = {self.sp_model.id_to_piece(lowercase__ ): id for id in range(self.sp_model.get_piece_size() )} _snake_case : Any = {v: k for k, v in self.vocab.items()} def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]: """simple docstring""" if text is None: return None _snake_case : List[str] = self.tokenize(lowercase__ ) _snake_case , _snake_case : Union[str, Any] = '''''', [] for i, ch in enumerate(lowercase__ ): if ch in self.SP_CHAR_MAPPING: _snake_case : Optional[Any] = self.SP_CHAR_MAPPING.get(lowercase__ ) else: _snake_case : Tuple = unicodedata.normalize('''NFKC''' , lowercase__ ) if self.is_whitespace(lowercase__ ): continue normalized_text += ch char_mapping.extend([i] * len(lowercase__ ) ) _snake_case , _snake_case , _snake_case : Dict = normalized_text, [], 0 if self.do_lower_case: _snake_case : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": _snake_case : List[Any] = token[1:] _snake_case : int = text[offset:].index(lowercase__ ) + offset _snake_case : Optional[Any] = start + len(lowercase__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) _snake_case : str = end return token_mapping @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" return len(self.vocab ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = self.__dict__.copy() _snake_case : Optional[Any] = None return state def __setstate__( self , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _snake_case : str = {} _snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase_ ( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(lowercase__ , lowercase__ ) for c in text) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=False , lowercase__=64 , lowercase__=0.1 ) -> Any: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: _snake_case : int = True if self.sp_model_kwargs.get('''alpha''' ) is not None: _snake_case : Dict = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: _snake_case : Union[str, Any] = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: _snake_case : str = self.sp_model.EncodeAsPieces(lowercase__ ) else: _snake_case : Optional[int] = self.sp_model.SampleEncodeAsPieces(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Tuple = [] for pi, piece in enumerate(lowercase__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(lowercase__ ) and pi != 0: new_pieces.append(lowercase__ ) continue else: continue _snake_case : List[Any] = 0 for i, chunk in enumerate(lowercase__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(lowercase__ ) or self.is_punct(lowercase__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(lowercase__ ) _snake_case : List[str] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _snake_case : Optional[int] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) _snake_case : Any = i if len(lowercase__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase_ ( self , lowercase__ ) -> int: """simple docstring""" _snake_case : Union[str, Any] = ''''''.join(lowercase__ ).replace(lowercase__ , ''' ''' ).strip() return out_string def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" _snake_case : List[Any] = self.convert_ids_to_tokens(lowercase__ ) _snake_case : Dict = ''''''.join(lowercase__ ).replace(lowercase__ , ''' ''' ).strip() return out_string def UpperCAmelCase_ ( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" return self.vocab.get(lowercase__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]: """simple docstring""" return self.reverse_vocab.get(lowercase__ , self.unk_token ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None ) -> str: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case : List[str] = [self.cls_token_id] _snake_case : Tuple = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None ) -> Union[str, Any]: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase_ ( self , lowercase__ , lowercase__=None , lowercase__=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1] return [1] + ([0] * len(lowercase__ )) + [1] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: # [CLS] X [SEP] return (len(lowercase__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(lowercase__ ) + 1) + [1] * (len(lowercase__ ) + 3) def UpperCAmelCase_ ( self , lowercase__ ) -> Dict: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(lowercase__ ) == 1: _snake_case : Union[str, Any] = unicodedata.category(lowercase__ ) if cat == "Zs": return True return False def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" _snake_case : Union[str, Any] = {} with io.open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(lowercase__ ): _snake_case : Optional[int] = line.rstrip('''\n''' ) _snake_case : List[Any] = int(lowercase__ ) return token_to_idx def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Dict = 0 if os.path.isdir(lowercase__ ): _snake_case : Tuple = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: _snake_case : Union[str, Any] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda lowercase__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ''' Please check that the vocabulary is not corrupted!''' ) _snake_case : Optional[int] = token_index writer.write(token + '''\n''' ) index += 1 _snake_case : Dict = os.path.join(lowercase__ , '''sentencepiece.bpe.model''' ) with open(lowercase__ , '''wb''' ) as fi: _snake_case : Dict = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (vocab_file,)
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin UpperCAmelCase : List[str] = False @skip_mps class lowerCamelCase (a__ , a__ , a__ , unittest.TestCase ): _lowercase : Any = StableDiffusionAttendAndExcitePipeline _lowercase : List[str] = False _lowercase : Union[str, Any] = TEXT_TO_IMAGE_PARAMS _lowercase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} ) _lowercase : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS _lowercase : str = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def UpperCAmelCase_ ( cls ) -> Dict: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls ) -> Optional[int]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" torch.manual_seed(0 ) _snake_case : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowercase__ , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowercase__ , set_alpha_to_one=lowercase__ , ) torch.manual_seed(0 ) _snake_case : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _snake_case : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , ) _snake_case : Optional[int] = CLIPTextModel(lowercase__ ) _snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _snake_case : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase_ ( self , lowercase__ , lowercase__=0 ) -> Any: """simple docstring""" if str(lowercase__ ).startswith('''mps''' ): _snake_case : int = torch.manual_seed(lowercase__ ) else: _snake_case : List[str] = torch.Generator(device=lowercase__ ).manual_seed(lowercase__ ) _snake_case : Union[str, Any] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : str = '''cpu''' _snake_case : Dict = self.get_dummy_components() _snake_case : Any = self.pipeline_class(**lowercase__ ) pipe.to(lowercase__ ) pipe.set_progress_bar_config(disable=lowercase__ ) _snake_case : Dict = self.get_dummy_inputs(lowercase__ ) _snake_case : List[str] = pipe(**lowercase__ ).images _snake_case : Optional[int] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) _snake_case : Dict = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _snake_case : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase__ , 1E-3 ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4 ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class lowerCamelCase (unittest.TestCase ): @classmethod def UpperCAmelCase_ ( cls ) -> Tuple: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls ) -> int: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : str = torch.manual_seed(51 ) _snake_case : str = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=lowercase__ , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _snake_case : List[Any] = '''a painting of an elephant with glasses''' _snake_case : Any = [5, 7] _snake_case : Tuple = pipe( prompt=lowercase__ , token_indices=lowercase__ , guidance_scale=7.5 , generator=lowercase__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _snake_case : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import warnings from contextlib import contextmanager from ....processing_utils import ProcessorMixin class lowerCamelCase (a__ ): _lowercase : Optional[int] = """MCTCTFeatureExtractor""" _lowercase : str = """AutoTokenizer""" def __init__( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" super().__init__(lowercase__ , lowercase__ ) _snake_case : List[str] = self.feature_extractor _snake_case : Any = False def __call__( self , *lowercase__ , **lowercase__ ) -> Optional[Any]: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*lowercase__ , **lowercase__ ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) _snake_case : List[str] = kwargs.pop('''raw_speech''' ) else: _snake_case : str = kwargs.pop('''audio''' , lowercase__ ) _snake_case : Dict = kwargs.pop('''sampling_rate''' , lowercase__ ) _snake_case : Optional[int] = kwargs.pop('''text''' , lowercase__ ) if len(lowercase__ ) > 0: _snake_case : Any = args[0] _snake_case : List[str] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _snake_case : Tuple = self.feature_extractor(lowercase__ , *lowercase__ , sampling_rate=lowercase__ , **lowercase__ ) if text is not None: _snake_case : str = self.tokenizer(lowercase__ , **lowercase__ ) if text is None: return inputs elif audio is None: return encodings else: _snake_case : Dict = encodings['''input_ids'''] return inputs def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Optional[int]: """simple docstring""" if self._in_target_context_manager: return self.current_processor.pad(*lowercase__ , **lowercase__ ) _snake_case : int = kwargs.pop('''input_features''' , lowercase__ ) _snake_case : Tuple = kwargs.pop('''labels''' , lowercase__ ) if len(lowercase__ ) > 0: _snake_case : Union[str, Any] = args[0] _snake_case : List[Any] = args[1:] if input_features is not None: _snake_case : Union[str, Any] = self.feature_extractor.pad(lowercase__ , *lowercase__ , **lowercase__ ) if labels is not None: _snake_case : Union[str, Any] = self.tokenizer.pad(lowercase__ , **lowercase__ ) if labels is None: return input_features elif input_features is None: return labels else: _snake_case : List[Any] = labels['''input_ids'''] return input_features def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.decode(*lowercase__ , **lowercase__ ) @contextmanager def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) _snake_case : List[str] = True _snake_case : Union[str, Any] = self.tokenizer yield _snake_case : Optional[int] = self.feature_extractor _snake_case : Union[str, Any] = False
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available UpperCAmelCase : List[Any] = {'tokenization_herbert': ['HerbertTokenizer']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = ['HerbertTokenizerFast'] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED UpperCAmelCase : Tuple = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } UpperCAmelCase : Any = { 'allenai/led-base-16384': 1_6_3_8_4, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def _a ( ): """simple docstring""" _snake_case : Dict = ( list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) ) ) _snake_case : int = bs[:] _snake_case : Optional[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase_ ) cs.append(2**8 + n ) n += 1 _snake_case : int = [chr(lowerCAmelCase_ ) for n in cs] return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = set() _snake_case : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _snake_case : Tuple = char return pairs class lowerCamelCase (a__ ): _lowercase : Optional[Any] = VOCAB_FILES_NAMES _lowercase : int = PRETRAINED_VOCAB_FILES_MAP _lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__ , lowercase__ , lowercase__="replace" , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=False , **lowercase__ , ) -> List[str]: """simple docstring""" _snake_case : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else bos_token _snake_case : Dict = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else eos_token _snake_case : Union[str, Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else sep_token _snake_case : Optional[int] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else cls_token _snake_case : Tuple = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else unk_token _snake_case : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _snake_case : str = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token super().__init__( errors=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) with open(lowercase__ , encoding='''utf-8''' ) as vocab_handle: _snake_case : List[str] = json.load(lowercase__ ) _snake_case : Any = {v: k for k, v in self.encoder.items()} _snake_case : Dict = errors # how to handle errors in decoding _snake_case : Optional[int] = bytes_to_unicode() _snake_case : Tuple = {v: k for k, v in self.byte_encoder.items()} with open(lowercase__ , encoding='''utf-8''' ) as merges_handle: _snake_case : Tuple = merges_handle.read().split('''\n''' )[1:-1] _snake_case : Tuple = [tuple(merge.split() ) for merge in bpe_merges] _snake_case : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) _snake_case : List[Any] = {} _snake_case : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _snake_case : Optional[int] = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return len(self.encoder ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[Any]: """simple docstring""" if token in self.cache: return self.cache[token] _snake_case : Optional[int] = tuple(lowercase__ ) _snake_case : List[str] = get_pairs(lowercase__ ) if not pairs: return token while True: _snake_case : Dict = min(lowercase__ , key=lambda lowercase__ : self.bpe_ranks.get(lowercase__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break _snake_case , _snake_case : Any = bigram _snake_case : Union[str, Any] = [] _snake_case : Tuple = 0 while i < len(lowercase__ ): try: _snake_case : Union[str, Any] = word.index(lowercase__ , lowercase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _snake_case : Union[str, Any] = j if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _snake_case : str = tuple(lowercase__ ) _snake_case : Any = new_word if len(lowercase__ ) == 1: break else: _snake_case : str = get_pairs(lowercase__ ) _snake_case : Union[str, Any] = ''' '''.join(lowercase__ ) _snake_case : Optional[int] = word return word def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : List[str] = [] for token in re.findall(self.pat , lowercase__ ): _snake_case : Optional[int] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowercase__ ).split(''' ''' ) ) return bpe_tokens def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]: """simple docstring""" return self.encoder.get(lowercase__ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.decoder.get(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[Any]: """simple docstring""" _snake_case : Any = ''''''.join(lowercase__ ) _snake_case : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Tuple = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case : List[str] = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase__ , ensure_ascii=lowercase__ ) + '''\n''' ) _snake_case : Dict = 0 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) _snake_case : Optional[Any] = token_index writer.write(''' '''.join(lowercase__ ) + '''\n''' ) index += 1 return vocab_file, merge_file def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _snake_case : str = [self.cls_token_id] _snake_case : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ ) if token_ids_a is None: return [1] + ([0] * len(lowercase__ )) + [1] return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) + [1] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : Optional[Any] = [self.sep_token_id] _snake_case : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self , lowercase__ , lowercase__=False , **lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : Union[str, Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(lowercase__ ) > 0 and not text[0].isspace()): _snake_case : Dict = ''' ''' + text return (text, kwargs) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = PaddingStrategy.DO_NOT_PAD , lowercase__ = None , lowercase__ = None , ) -> dict: """simple docstring""" _snake_case : int = super()._pad( encoded_inputs=lowercase__ , max_length=lowercase__ , padding_strategy=lowercase__ , pad_to_multiple_of=lowercase__ , return_attention_mask=lowercase__ , ) # Load from model defaults if return_attention_mask is None: _snake_case : Optional[Any] = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: _snake_case : Optional[Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. _snake_case : Dict = len(encoded_inputs['''global_attention_mask'''] ) != len(lowercase__ ) if needs_to_be_padded: _snake_case : Any = len(lowercase__ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` _snake_case : Any = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": _snake_case : Optional[Any] = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : List[Any] = [ ['attention', 'attn'], ['encoder_attention', 'encoder_attn'], ['q_lin', 'q_proj'], ['k_lin', 'k_proj'], ['v_lin', 'v_proj'], ['out_lin', 'out_proj'], ['norm_embeddings', 'layernorm_embedding'], ['position_embeddings', 'embed_positions'], ['embeddings', 'embed_tokens'], ['ffn.lin', 'fc'], ] def _a ( lowerCAmelCase_ ): """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _snake_case : int = k.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if k.startswith('''encoder''' ): _snake_case : Optional[int] = k.replace('''.attn''' , '''.self_attn''' ) _snake_case : str = k.replace('''norm1''' , '''self_attn_layer_norm''' ) _snake_case : Any = k.replace('''norm2''' , '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _snake_case : Optional[int] = k.replace('''norm1''' , '''self_attn_layer_norm''' ) _snake_case : Any = k.replace('''norm2''' , '''encoder_attn_layer_norm''' ) _snake_case : Union[str, Any] = k.replace('''norm3''' , '''final_layer_norm''' ) return k def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _snake_case : Optional[int] = sd.pop(lowerCAmelCase_ ) _snake_case : Tuple = k.replace('''layernorm_embedding''' , '''layer_norm''' ) assert new_k not in sd _snake_case : Any = v UpperCAmelCase : Any = ['START'] @torch.no_grad() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = torch.load(lowerCAmelCase_ , map_location='''cpu''' ) _snake_case : Tuple = model['''model'''] _snake_case : Dict = BlenderbotConfig.from_json_file(lowerCAmelCase_ ) _snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(lowerCAmelCase_ ) _snake_case : Optional[int] = m.model.state_dict().keys() _snake_case : Optional[Any] = [] _snake_case : Dict = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _snake_case : Optional[int] = rename_state_dict_key(lowerCAmelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _snake_case : str = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCAmelCase_ ) m.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) m.half() m.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin') parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.') parser.add_argument( '--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use' ) UpperCAmelCase : str = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : int = current_set.copy() for row_index, row in enumerate(lowerCAmelCase_ ): _snake_case : List[Any] = row[0] for column_index, column in enumerate(lowerCAmelCase_ ): if magnitude == 0: _snake_case : Dict = column continue _snake_case : Tuple = column / magnitude # Subtract to cancel term _snake_case : Any = current_set[0] _snake_case : Optional[Any] = [first_row] _snake_case : List[Any] = current_set[1::] for row in current_set: _snake_case : Optional[Any] = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(lowerCAmelCase_ ) continue for column_index in range(len(lowerCAmelCase_ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(lowerCAmelCase_ ) # Create next recursion iteration set if len(final_set[0] ) != 3: _snake_case : int = final_set[0] _snake_case : List[Any] = [] _snake_case : str = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _snake_case : int = simplify(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , lowerCAmelCase_ ) _snake_case : Union[str, Any] = resultant return final_set def _a ( lowerCAmelCase_ ): """simple docstring""" if len(lowerCAmelCase_ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) _snake_case : int = len(lowerCAmelCase_ ) + 1 if any(len(lowerCAmelCase_ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(lowerCAmelCase_ , (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(lowerCAmelCase_ ) == 1: return [equations[0][-1] / equations[0][0]] _snake_case : Tuple = equations.copy() if any(0 in row for row in data_set ): _snake_case : List[str] = data_set.copy() _snake_case : str = [] for row_index, row in enumerate(lowerCAmelCase_ ): if 0 not in row: _snake_case : Optional[int] = data_set.pop(lowerCAmelCase_ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0 , lowerCAmelCase_ ) _snake_case : List[str] = data_set.copy() _snake_case : Any = simplify(lowerCAmelCase_ ) _snake_case : str = simplified[::-1] _snake_case : list = [] for row in simplified: _snake_case : List[Any] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _snake_case : int = row.copy()[: len(lowerCAmelCase_ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(lowerCAmelCase_ ) == 0: solutions.append(0 ) continue _snake_case : str = temp_row[1::] _snake_case : Any = temp_row[::-1] for column_index, column in enumerate(lowerCAmelCase_ ): current_solution -= column * solutions[column_index] solutions.append(lowerCAmelCase_ ) _snake_case : Dict = [] for item in solutions: final.append(float(round(lowerCAmelCase_ , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : str = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from __future__ import annotations import math def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(lowerCAmelCase_ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , ) return min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , ) def _a ( ): """simple docstring""" _snake_case : Optional[int] = [90, 23, 6, 33, 21, 65, 123, 34_423] _snake_case : int = math.log(len(lowerCAmelCase_ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : List[Any] = { 'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'], 'processing_mgp_str': ['MgpstrProcessor'], 'tokenization_mgp_str': ['MgpstrTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST', 'MgpstrModel', 'MgpstrPreTrainedModel', 'MgpstrForSceneTextRecognition', ] if TYPE_CHECKING: from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig from .processing_mgp_str import MgpstrProcessor from .tokenization_mgp_str import MgpstrTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mgp_str import ( MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST, MgpstrForSceneTextRecognition, MgpstrModel, MgpstrPreTrainedModel, ) else: import sys UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) UpperCAmelCase : List[str] = '\\n Text data.\n Second line of data.' UpperCAmelCase : Dict = 'file' @pytest.fixture(scope='''session''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''') _snake_case : Dict = bytes(lowerCAmelCase_ , '''utf-8''' ) with zstd.open(lowerCAmelCase_ , '''wb''' ) as f: f.write(lowerCAmelCase_ ) return path @pytest.fixture def _a ( lowerCAmelCase_ ): """simple docstring""" with open(os.path.join(tmpfs.local_root_dir , lowerCAmelCase_ ) , '''w''' ) as f: f.write(lowerCAmelCase_ ) return FILE_PATH @pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path} _snake_case : Optional[Any] = input_paths[compression_format] _snake_case : str = tmp_path / '''cache''' _snake_case : int = DownloadConfig(cache_dir=lowerCAmelCase_ , extract_compressed_file=lowerCAmelCase_ ) _snake_case : Optional[Any] = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ ) with open(lowerCAmelCase_ ) as f: _snake_case : List[str] = f.read() with open(lowerCAmelCase_ ) as f: _snake_case : str = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('''default_extracted''' , [True, False] ) @pytest.mark.parametrize('''default_cache_dir''' , [True, False] ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = '''custom_cache''' _snake_case : int = '''custom_extracted_dir''' _snake_case : Dict = tmp_path / '''custom_extracted_path''' if default_extracted: _snake_case : Dict = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''') else: monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , lowerCAmelCase_ ) monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowerCAmelCase_ ) ) _snake_case : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) _snake_case : Optional[int] = xz_file _snake_case : Union[str, Any] = ( DownloadConfig(extract_compressed_file=lowerCAmelCase_ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowerCAmelCase_ ) ) _snake_case : str = cached_path(lowerCAmelCase_ , download_config=lowerCAmelCase_ ) assert Path(lowerCAmelCase_ ).parent.parts[-2:] == expected def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = str(Path(lowerCAmelCase_ ).resolve() ) assert cached_path(lowerCAmelCase_ ) == text_file # relative path _snake_case : Optional[int] = str(Path(lowerCAmelCase_ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(lowerCAmelCase_ ) == text_file def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = str(tmp_path.resolve() / '''__missing_file__.txt''' ) with pytest.raises(lowerCAmelCase_ ): cached_path(lowerCAmelCase_ ) # relative path _snake_case : Optional[Any] = '''./__missing_file__.txt''' with pytest.raises(lowerCAmelCase_ ): cached_path(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = get_from_cache(f'''tmp://{tmpfs_file}''' ) with open(lowerCAmelCase_ ) as f: _snake_case : Dict = f.read() assert output_file_content == FILE_CONTENT @patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ ) def _a ( ): """simple docstring""" with pytest.raises(lowerCAmelCase_ ): cached_path('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(lowerCAmelCase_ ): http_get('''https://huggingface.co''' , temp_file=lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): http_head('''https://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(lowerCAmelCase_ ): ftp_get('''ftp://huggingface.co''' , temp_file=lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): ftp_head('''ftp://huggingface.co''' ) @patch('''datasets.config.HF_DATASETS_OFFLINE''' , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = tmp_path_factory.mktemp('''data''' ) / '''file.html''' with pytest.raises(lowerCAmelCase_ ): fsspec_get('''s3://huggingface.co''' , temp_file=lowerCAmelCase_ ) with pytest.raises(lowerCAmelCase_ ): fsspec_head('''s3://huggingface.co''' )
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' import math from datetime import datetime, timedelta def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = year % 19 _snake_case : str = year % 4 _snake_case : Any = year % 7 _snake_case : Union[str, Any] = math.floor(year / 100 ) _snake_case : Tuple = math.floor((13 + 8 * leap_day_inhibits) / 25 ) _snake_case : Optional[int] = leap_day_inhibits / 4 _snake_case : Optional[int] = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 _snake_case : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _snake_case : Optional[Any] = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon _snake_case : Union[str, Any] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(lowerCAmelCase_ , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(lowerCAmelCase_ , 4 , 18 ) else: return datetime(lowerCAmelCase_ , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): UpperCAmelCase : Optional[Any] = 'will be' if year > datetime.now().year else 'was' print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' UpperCAmelCase : Optional[Any] = 'Tobias Carryer' from time import time class lowerCamelCase : def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__=int(time() ) ) -> Any: # noqa: B008 """simple docstring""" _snake_case : Dict = multiplier _snake_case : str = increment _snake_case : Dict = modulo _snake_case : str = seed def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : str = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. UpperCAmelCase : Any = LinearCongruentialGenerator(1_6_6_4_5_2_5, 1_0_1_3_9_0_4_2_2_3, 2 << 3_1) while True: print(lcg.next_number())
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCamelCase (a__ ): _lowercase : List[Any] = """""" _lowercase : Tuple = """hf-legacy""" # "hf://"" is reserved for hffs def __init__( self , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> Optional[Any]: """simple docstring""" super().__init__(self , **lowercase__ ) _snake_case : List[Any] = repo_info _snake_case : int = token _snake_case : Optional[Any] = None def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" if self.dir_cache is None: _snake_case : List[str] = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _snake_case : Tuple = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(lowercase__ ): {'''name''': str(lowercase__ ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = "rb" , **lowercase__ , ) -> Union[str, Any]: """simple docstring""" if not isinstance(self.repo_info , lowercase__ ): raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' ) _snake_case : Optional[Any] = hf_hub_url(self.repo_info.id , lowercase__ , revision=self.repo_info.sha ) return fsspec.open( lowercase__ , mode=lowercase__ , headers=get_authentication_headers_for_url(lowercase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def UpperCAmelCase_ ( self , lowercase__ , **lowercase__ ) -> Optional[int]: """simple docstring""" self._get_dirs() _snake_case : List[Any] = self._strip_protocol(lowercase__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=False , **lowercase__ ) -> List[str]: """simple docstring""" self._get_dirs() _snake_case : Optional[int] = PurePosixPath(path.strip('''/''' ) ) _snake_case : Any = {} for p, f in self.dir_cache.items(): _snake_case : List[Any] = PurePosixPath(p.strip('''/''' ) ) _snake_case : Tuple = p.parent if root == path: _snake_case : int = f _snake_case : Optional[Any] = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' import torch from diffusers import DDPMParallelScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase (a__ ): _lowercase : Union[str, Any] = (DDPMParallelScheduler,) def UpperCAmelCase_ ( self , **lowercase__ ) -> Optional[int]: """simple docstring""" _snake_case : int = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''variance_type''': '''fixed_small''', '''clip_sample''': True, } config.update(**lowercase__ ) return config def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for timesteps in [1, 5, 100, 1_000]: self.check_over_configs(num_train_timesteps=lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowercase__ ) def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" self.check_over_configs(thresholding=lowercase__ ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for t in [0, 500, 999]: self.check_over_forward(time_step=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Union[str, Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config() _snake_case : Any = scheduler_class(**lowercase__ ) assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5 def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Dict = self.scheduler_classes[0] _snake_case : Tuple = self.get_scheduler_config() _snake_case : Optional[Any] = scheduler_class(**lowercase__ ) _snake_case : Optional[int] = len(lowercase__ ) _snake_case : Dict = self.dummy_model() _snake_case : Dict = self.dummy_sample_deter _snake_case : List[Any] = self.dummy_sample_deter + 0.1 _snake_case : Dict = self.dummy_sample_deter - 0.1 _snake_case : str = samplea.shape[0] _snake_case : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 ) _snake_case : List[str] = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ ) _snake_case : int = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) _snake_case : str = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) ) _snake_case : int = torch.sum(torch.abs(lowercase__ ) ) _snake_case : Union[str, Any] = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 1_153.1_833 ) < 1E-2 assert abs(result_mean.item() - 0.5_005 ) < 1E-3 def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : Optional[Any] = self.scheduler_classes[0] _snake_case : List[str] = self.get_scheduler_config() _snake_case : Optional[int] = scheduler_class(**lowercase__ ) _snake_case : List[str] = len(lowercase__ ) _snake_case : Optional[Any] = self.dummy_model() _snake_case : Dict = self.dummy_sample_deter _snake_case : Optional[int] = torch.manual_seed(0 ) for t in reversed(range(lowercase__ ) ): # 1. predict noise residual _snake_case : Union[str, Any] = model(lowercase__ , lowercase__ ) # 2. predict previous mean of sample x_t-1 _snake_case : int = scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample _snake_case : List[str] = pred_prev_sample _snake_case : int = torch.sum(torch.abs(lowercase__ ) ) _snake_case : Tuple = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 258.9_606 ) < 1E-2 assert abs(result_mean.item() - 0.3_372 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Tuple = self.scheduler_classes[0] _snake_case : str = self.get_scheduler_config(prediction_type='''v_prediction''' ) _snake_case : Dict = scheduler_class(**lowercase__ ) _snake_case : Dict = len(lowercase__ ) _snake_case : Union[str, Any] = self.dummy_model() _snake_case : Optional[int] = self.dummy_sample_deter _snake_case : List[Any] = torch.manual_seed(0 ) for t in reversed(range(lowercase__ ) ): # 1. predict noise residual _snake_case : List[str] = model(lowercase__ , lowercase__ ) # 2. predict previous mean of sample x_t-1 _snake_case : Union[str, Any] = scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample _snake_case : Optional[Any] = pred_prev_sample _snake_case : List[Any] = torch.sum(torch.abs(lowercase__ ) ) _snake_case : str = torch.mean(torch.abs(lowercase__ ) ) assert abs(result_sum.item() - 202.0_296 ) < 1E-2 assert abs(result_mean.item() - 0.2_631 ) < 1E-3 def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Dict = self.scheduler_classes[0] _snake_case : Optional[Any] = self.get_scheduler_config() _snake_case : Union[str, Any] = scheduler_class(**lowercase__ ) _snake_case : str = [100, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=lowercase__ ) _snake_case : List[str] = scheduler.timesteps for i, timestep in enumerate(lowercase__ ): if i == len(lowercase__ ) - 1: _snake_case : int = -1 else: _snake_case : Optional[Any] = timesteps[i + 1] _snake_case : str = scheduler.previous_timestep(lowercase__ ) _snake_case : Tuple = prev_t.item() self.assertEqual(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : str = self.scheduler_classes[0] _snake_case : int = self.get_scheduler_config() _snake_case : int = scheduler_class(**lowercase__ ) _snake_case : Dict = [100, 87, 50, 51, 0] with self.assertRaises(lowercase__ , msg='''`custom_timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : Optional[Any] = self.scheduler_classes[0] _snake_case : Any = self.get_scheduler_config() _snake_case : Any = scheduler_class(**lowercase__ ) _snake_case : Tuple = [100, 87, 50, 1, 0] _snake_case : List[Any] = len(lowercase__ ) with self.assertRaises(lowercase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=lowercase__ , timesteps=lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : int = self.scheduler_classes[0] _snake_case : int = self.get_scheduler_config() _snake_case : Union[str, Any] = scheduler_class(**lowercase__ ) _snake_case : Union[str, Any] = [scheduler.config.num_train_timesteps] with self.assertRaises( lowercase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=lowercase__ )
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json', 'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json', } class lowerCamelCase (a__ ): _lowercase : Optional[int] = """xlnet""" _lowercase : Optional[Any] = ["""mems"""] _lowercase : Any = { """n_token""": """vocab_size""", # Backward compatibility """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase__=32_000 , lowercase__=1_024 , lowercase__=24 , lowercase__=16 , lowercase__=4_096 , lowercase__="gelu" , lowercase__=True , lowercase__="bi" , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=0.1 , lowercase__=512 , lowercase__=None , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=-1 , lowercase__=False , lowercase__="last" , lowercase__=True , lowercase__="tanh" , lowercase__=0.1 , lowercase__=5 , lowercase__=5 , lowercase__=5 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" _snake_case : Union[str, Any] = vocab_size _snake_case : Dict = d_model _snake_case : int = n_layer _snake_case : Union[str, Any] = n_head if d_model % n_head != 0: raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' ) if "d_head" in kwargs: if kwargs["d_head"] != d_model // n_head: raise ValueError( F'''`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})''' ) _snake_case : str = d_model // n_head _snake_case : Any = ff_activation _snake_case : int = d_inner _snake_case : List[Any] = untie_r _snake_case : Optional[int] = attn_type _snake_case : Optional[Any] = initializer_range _snake_case : str = layer_norm_eps _snake_case : List[Any] = dropout _snake_case : Any = mem_len _snake_case : List[Any] = reuse_len _snake_case : Union[str, Any] = bi_data _snake_case : List[str] = clamp_len _snake_case : Optional[int] = same_length _snake_case : List[Any] = summary_type _snake_case : str = summary_use_proj _snake_case : Optional[Any] = summary_activation _snake_case : Optional[Any] = summary_last_dropout _snake_case : List[Any] = start_n_top _snake_case : List[str] = end_n_top _snake_case : Tuple = bos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = eos_token_id if "use_cache" in kwargs: warnings.warn( '''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`''' ''' instead.''' , lowercase__ , ) _snake_case : Optional[int] = kwargs['''use_cache'''] _snake_case : int = use_mems_eval _snake_case : Union[str, Any] = use_mems_train super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ ) @property def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" raise NotImplementedError( F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Union[str, Any] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Optional[Any] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> int: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Optional[int] = ["""torch""", """transformers""", """onnx"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> int: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm UpperCAmelCase : Tuple = logging.get_logger(__name__) @dataclass class lowerCamelCase (a__ ): _lowercase : Any = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self , **lowercase__ ) -> List[Any]: """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: _snake_case : Optional[int] = deprecated_arg[3:] setattr(self , lowercase__ , not kwargs.pop(lowercase__ ) ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) _snake_case : Optional[Any] = kwargs.pop('''torchscript''' , self.torchscript ) _snake_case : Dict = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics ) _snake_case : Tuple = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level ) super().__init__(**lowercase__ ) _lowercase : bool = field(default=a__ , metadata={"""help""": """Trace the models using torchscript"""} ) _lowercase : bool = field(default=a__ , metadata={"""help""": """Print Xla/PyTorch tpu metrics"""} ) _lowercase : str = field( default="""O1""" , metadata={ """help""": ( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. """ """See details at https://nvidia.github.io/apex/amp.html""" ) } , ) @cached_property def UpperCAmelCase_ ( self ) -> Tuple["torch.device", int]: """simple docstring""" requires_backends(self , ['''torch'''] ) logger.info('''PyTorch: setting up devices''' ) if not self.cuda: _snake_case : List[str] = torch.device('''cpu''' ) _snake_case : int = 0 elif is_torch_tpu_available(): _snake_case : Tuple = xm.xla_device() _snake_case : int = 0 else: _snake_case : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) _snake_case : Optional[Any] = torch.cuda.device_count() return device, n_gpu @property def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" return is_torch_tpu_available() and self.tpu @property def UpperCAmelCase_ ( self ) -> int: """simple docstring""" requires_backends(self , ['''torch'''] ) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def UpperCAmelCase_ ( self ) -> "torch.device": """simple docstring""" requires_backends(self , ['''torch'''] ) return self._setup_devices[0] @property def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" requires_backends(self , ['''torch'''] ) return self._setup_devices[1] @property def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.n_gpu > 0
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : List[Any] = '▁' UpperCAmelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model'} UpperCAmelCase : Optional[int] = { 'vocab_file': { 'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model', } } UpperCAmelCase : int = { 'facebook/xglm-564M': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : List[Any] = VOCAB_FILES_NAMES _lowercase : Any = PRETRAINED_VOCAB_FILES_MAP _lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[Any] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__ = None , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer _snake_case : Union[str, Any] = 7 _snake_case : Union[str, Any] = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )] _snake_case : List[str] = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) _snake_case : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowercase__ ) ) _snake_case : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _snake_case : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token _snake_case : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} _snake_case : Dict = len(self.sp_model ) _snake_case : str = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(lowercase__ ) _snake_case : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> List[Any]: """simple docstring""" _snake_case : List[Any] = self.__dict__.copy() _snake_case : Any = None _snake_case : List[str] = self.sp_model.serialized_model_proto() return state def __setstate__( self , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : Any = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _snake_case : Any = {} _snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.sep_token_id] + token_ids_a _snake_case : List[str] = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ ) if token_ids_a is None: return [1] + ([0] * len(lowercase__ )) return [1] + ([0] * len(lowercase__ )) + [1, 1] + ([0] * len(lowercase__ )) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : List[str] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[Any] = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowercase__ , out_type=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _snake_case : List[str] = self.sp_model.PieceToId(lowercase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" _snake_case : str = ''''''.join(lowercase__ ).replace(lowercase__ , ''' ''' ).strip() return out_string def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Any = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , '''wb''' ) as fi: _snake_case : int = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,)
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ = 10 ): """simple docstring""" if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or n < 0: raise ValueError('''Invalid input''' ) _snake_case : Tuple = 10**n _snake_case : Any = 28_433 * (pow(2 , 7_830_457 , lowerCAmelCase_ )) + 1 return str(number % modulus ) if __name__ == "__main__": from doctest import testmod testmod() print(F"""{solution(1_0) = }""")
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case , _snake_case : Tuple = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) _snake_case : List[Any] = '''A painting of a squirrel eating a burger''' _snake_case : List[Any] = jax.device_count() _snake_case : Union[str, Any] = num_samples * [prompt] _snake_case : Tuple = sd_pipe.prepare_inputs(lowercase__ ) _snake_case : int = replicate(lowercase__ ) _snake_case : List[Any] = shard(lowercase__ ) _snake_case : List[str] = jax.random.PRNGKey(0 ) _snake_case : List[Any] = jax.random.split(lowercase__ , jax.device_count() ) _snake_case : Optional[Any] = sd_pipe(lowercase__ , lowercase__ , lowercase__ , num_inference_steps=25 , jit=lowercase__ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _snake_case : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : int = images[0, 253:256, 253:256, -1] _snake_case : List[str] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : Dict = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[str] = '''stabilityai/stable-diffusion-2''' _snake_case , _snake_case : Any = FlaxDPMSolverMultistepScheduler.from_pretrained(lowercase__ , subfolder='''scheduler''' ) _snake_case , _snake_case : List[str] = FlaxStableDiffusionPipeline.from_pretrained( lowercase__ , scheduler=lowercase__ , revision='''bf16''' , dtype=jnp.bfloataa , ) _snake_case : Optional[Any] = scheduler_params _snake_case : Optional[int] = '''A painting of a squirrel eating a burger''' _snake_case : List[Any] = jax.device_count() _snake_case : Tuple = num_samples * [prompt] _snake_case : Optional[Any] = sd_pipe.prepare_inputs(lowercase__ ) _snake_case : Tuple = replicate(lowercase__ ) _snake_case : Any = shard(lowercase__ ) _snake_case : str = jax.random.PRNGKey(0 ) _snake_case : List[Any] = jax.random.split(lowercase__ , jax.device_count() ) _snake_case : str = sd_pipe(lowercase__ , lowercase__ , lowercase__ , num_inference_steps=25 , jit=lowercase__ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _snake_case : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _snake_case : List[Any] = images[0, 253:256, 253:256, -1] _snake_case : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) ) _snake_case : str = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase (nn.Module ): def __init__( self , lowercase__ ) -> str: """simple docstring""" super().__init__() _snake_case : Dict = torchvision.models.resnetaaa(pretrained=lowercase__ ) _snake_case : Optional[Any] = list(model.children() )[:-2] _snake_case : Union[str, Any] = nn.Sequential(*lowercase__ ) _snake_case : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" _snake_case : Tuple = self.pool(self.model(lowercase__ ) ) _snake_case : Union[str, Any] = torch.flatten(lowercase__ , start_dim=2 ) _snake_case : List[Any] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase (a__ ): def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : List[Any] = [json.loads(lowercase__ ) for l in open(lowercase__ )] _snake_case : Optional[Any] = os.path.dirname(lowercase__ ) _snake_case : Dict = tokenizer _snake_case : Any = labels _snake_case : Tuple = len(lowercase__ ) _snake_case : Tuple = max_seq_length _snake_case : Optional[Any] = transforms def __len__( self ) -> Optional[int]: """simple docstring""" return len(self.data ) def __getitem__( self , lowercase__ ) -> Optional[int]: """simple docstring""" _snake_case : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]['''text'''] , add_special_tokens=lowercase__ ) ) _snake_case , _snake_case , _snake_case : Dict = sentence[0], sentence[1:-1], sentence[-1] _snake_case : int = sentence[: self.max_seq_length] _snake_case : Union[str, Any] = torch.zeros(self.n_classes ) _snake_case : Any = 1 _snake_case : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]['''img'''] ) ).convert('''RGB''' ) _snake_case : Optional[Any] = self.transforms(lowercase__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[Any] = Counter() for row in self.data: label_freqs.update(row['''label'''] ) return label_freqs def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = [len(row['''sentence'''] ) for row in batch] _snake_case , _snake_case : List[str] = len(lowerCAmelCase_ ), max(lowerCAmelCase_ ) _snake_case : Dict = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long ) _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ): _snake_case : Dict = input_row['''sentence'''] _snake_case : str = 1 _snake_case : Optional[int] = torch.stack([row['''image'''] for row in batch] ) _snake_case : str = torch.stack([row['''label'''] for row in batch] ) _snake_case : Dict = torch.stack([row['''image_start_token'''] for row in batch] ) _snake_case : Union[str, Any] = torch.stack([row['''image_end_token'''] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def _a ( ): """simple docstring""" return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def _a ( ): """simple docstring""" return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('1.0.0a'): raise Exception('requires fairseq >= 1.0.0a') logging.set_verbosity_info() UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Dict = 'Hello world! cécé herlolip' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = FairseqRobertaModel.from_pretrained(lowerCAmelCase_ ) roberta.eval() # disable dropout _snake_case : Optional[int] = roberta.model.encoder.sentence_encoder _snake_case : Optional[Any] = XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: _snake_case : Dict = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , lowerCAmelCase_ ) _snake_case : List[str] = XLMRobertaXLForSequenceClassification(lowerCAmelCase_ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase_ ) model.eval() # Now let's copy all the weights. # Embeddings _snake_case : int = roberta_sent_encoder.embed_tokens.weight _snake_case : Union[str, Any] = roberta_sent_encoder.embed_positions.weight _snake_case : str = torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. _snake_case : List[Any] = roberta_sent_encoder.layer_norm.weight _snake_case : List[Any] = roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer _snake_case : BertLayer = model.roberta.encoder.layer[i] _snake_case : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i] _snake_case : RobertaAttention = layer.attention _snake_case : Optional[Any] = roberta_layer.self_attn_layer_norm.weight _snake_case : List[str] = roberta_layer.self_attn_layer_norm.bias # self attention _snake_case : BertSelfAttention = layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) _snake_case : Optional[Any] = roberta_layer.self_attn.q_proj.weight _snake_case : int = roberta_layer.self_attn.q_proj.bias _snake_case : Optional[int] = roberta_layer.self_attn.k_proj.weight _snake_case : Any = roberta_layer.self_attn.k_proj.bias _snake_case : List[Any] = roberta_layer.self_attn.v_proj.weight _snake_case : Any = roberta_layer.self_attn.v_proj.bias # self-attention output _snake_case : BertSelfOutput = layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape _snake_case : Optional[Any] = roberta_layer.self_attn.out_proj.weight _snake_case : Union[str, Any] = roberta_layer.self_attn.out_proj.bias # this one is final layer norm _snake_case : str = roberta_layer.final_layer_norm.weight _snake_case : Optional[Any] = roberta_layer.final_layer_norm.bias # intermediate _snake_case : BertIntermediate = layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape _snake_case : str = roberta_layer.fca.weight _snake_case : List[Any] = roberta_layer.fca.bias # output _snake_case : BertOutput = layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape _snake_case : Optional[int] = roberta_layer.fca.weight _snake_case : Any = roberta_layer.fca.bias # end of layer if classification_head: _snake_case : List[Any] = roberta.model.classification_heads['''mnli'''].dense.weight _snake_case : Tuple = roberta.model.classification_heads['''mnli'''].dense.bias _snake_case : int = roberta.model.classification_heads['''mnli'''].out_proj.weight _snake_case : Any = roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head _snake_case : Tuple = roberta.model.encoder.lm_head.dense.weight _snake_case : Union[str, Any] = roberta.model.encoder.lm_head.dense.bias _snake_case : int = roberta.model.encoder.lm_head.layer_norm.weight _snake_case : Dict = roberta.model.encoder.lm_head.layer_norm.bias _snake_case : int = roberta.model.encoder.lm_head.weight _snake_case : Union[str, Any] = roberta.model.encoder.lm_head.bias # Let's check that we get the same results. _snake_case : torch.Tensor = roberta.encode(lowerCAmelCase_ ).unsqueeze(0 ) # batch of size 1 _snake_case : Any = model(lowerCAmelCase_ )[0] if classification_head: _snake_case : Any = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase_ ) ) else: _snake_case : Any = roberta.model(lowerCAmelCase_ )[0] print(our_output.shape , their_output.shape ) _snake_case : str = torch.max(torch.abs(our_output - their_output ) ).item() print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7 _snake_case : List[Any] = torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(lowerCAmelCase_ ).mkdir(parents=lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--classification_head', action='store_true', help='Whether to convert a final classification head.' ) UpperCAmelCase : Dict = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' class lowerCamelCase : def __init__( self , lowercase__ ) -> None: """simple docstring""" _snake_case : List[str] = size _snake_case : Optional[int] = [0] * size _snake_case : Optional[int] = [0] * size @staticmethod def UpperCAmelCase_ ( lowercase__ ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase_ ( lowercase__ ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" _snake_case : Optional[Any] = value while index < self.size: _snake_case : Optional[int] = self.get_prev(lowercase__ ) + 1 if current_left_border == index: _snake_case : Optional[int] = value else: _snake_case : Dict = max(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Tuple = self.get_next(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive _snake_case : Dict = 0 while left <= right: _snake_case : Tuple = self.get_prev(lowercase__ ) if left <= current_left: _snake_case : Union[str, Any] = max(lowercase__ , self.tree[right] ) _snake_case : Optional[int] = current_left else: _snake_case : str = max(lowercase__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ = 4 ): """simple docstring""" _snake_case : List[Any] = abs(lowerCAmelCase_ ) or 4 return [[1 + x + y * row_size for x in range(lowerCAmelCase_ )] for y in range(lowerCAmelCase_ )] def _a ( lowerCAmelCase_ ): """simple docstring""" return reverse_row(transpose(lowerCAmelCase_ ) ) # OR.. transpose(reverse_column(matrix)) def _a ( lowerCAmelCase_ ): """simple docstring""" return reverse_row(reverse_column(lowerCAmelCase_ ) ) # OR.. reverse_column(reverse_row(matrix)) def _a ( lowerCAmelCase_ ): """simple docstring""" return reverse_column(transpose(lowerCAmelCase_ ) ) # OR.. transpose(reverse_row(matrix)) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = [list(lowerCAmelCase_ ) for x in zip(*lowerCAmelCase_ )] return matrix def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = matrix[::-1] return matrix def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = [x[::-1] for x in matrix] return matrix def _a ( lowerCAmelCase_ ): """simple docstring""" for i in matrix: print(*lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : Dict = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) UpperCAmelCase : List[str] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) UpperCAmelCase : List[str] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ = 50 ): """simple docstring""" _snake_case : str = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"""{solution() = }""")
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' from collections import deque class lowerCamelCase : def __init__( self , lowercase__ , lowercase__ , lowercase__ ) -> None: """simple docstring""" _snake_case : int = process_name # process name _snake_case : Any = arrival_time # arrival time of the process # completion time of finished process or last interrupted time _snake_case : Any = arrival_time _snake_case : str = burst_time # remaining burst time _snake_case : Dict = 0 # total time of the process wait in ready queue _snake_case : int = 0 # time from arrival time to completion time class lowerCamelCase : def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> None: """simple docstring""" _snake_case : Union[str, Any] = number_of_queues # time slice of queues that round robin algorithm applied _snake_case : Optional[int] = time_slices # unfinished process is in this ready_queue _snake_case : List[Any] = queue # current time _snake_case : Any = current_time # finished process is in this sequence queue _snake_case : deque[Process] = deque() def UpperCAmelCase_ ( self ) -> list[str]: """simple docstring""" _snake_case : Tuple = [] for i in range(len(self.finish_queue ) ): sequence.append(self.finish_queue[i].process_name ) return sequence def UpperCAmelCase_ ( self , lowercase__ ) -> list[int]: """simple docstring""" _snake_case : Union[str, Any] = [] for i in range(len(lowercase__ ) ): waiting_times.append(queue[i].waiting_time ) return waiting_times def UpperCAmelCase_ ( self , lowercase__ ) -> list[int]: """simple docstring""" _snake_case : List[str] = [] for i in range(len(lowercase__ ) ): turnaround_times.append(queue[i].turnaround_time ) return turnaround_times def UpperCAmelCase_ ( self , lowercase__ ) -> list[int]: """simple docstring""" _snake_case : Any = [] for i in range(len(lowercase__ ) ): completion_times.append(queue[i].stop_time ) return completion_times def UpperCAmelCase_ ( self , lowercase__ ) -> list[int]: """simple docstring""" return [q.burst_time for q in queue] def UpperCAmelCase_ ( self , lowercase__ ) -> int: """simple docstring""" process.waiting_time += self.current_time - process.stop_time return process.waiting_time def UpperCAmelCase_ ( self , lowercase__ ) -> deque[Process]: """simple docstring""" _snake_case : deque[Process] = deque() # sequence deque of finished process while len(lowercase__ ) != 0: _snake_case : List[str] = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of current process self.update_waiting_time(lowercase__ ) # update current time self.current_time += cp.burst_time # finish the process and set the process's burst-time 0 _snake_case : Tuple = 0 # set the process's turnaround time because it is finished _snake_case : Union[str, Any] = self.current_time - cp.arrival_time # set the completion time _snake_case : Tuple = self.current_time # add the process to queue that has finished queue finished.append(lowercase__ ) self.finish_queue.extend(lowercase__ ) # add finished process to finish queue # FCFS will finish all remaining processes return finished def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> tuple[deque[Process], deque[Process]]: """simple docstring""" _snake_case : deque[Process] = deque() # sequence deque of terminated process # just for 1 cycle and unfinished processes will go back to queue for _ in range(len(lowercase__ ) ): _snake_case : Any = ready_queue.popleft() # current process # if process's arrival time is later than current time, update current time if self.current_time < cp.arrival_time: self.current_time += cp.arrival_time # update waiting time of unfinished processes self.update_waiting_time(lowercase__ ) # if the burst time of process is bigger than time-slice if cp.burst_time > time_slice: # use CPU for only time-slice self.current_time += time_slice # update remaining burst time cp.burst_time -= time_slice # update end point time _snake_case : Union[str, Any] = self.current_time # locate the process behind the queue because it is not finished ready_queue.append(lowercase__ ) else: # use CPU for remaining burst time self.current_time += cp.burst_time # set burst time 0 because the process is finished _snake_case : Optional[Any] = 0 # set the finish time _snake_case : Tuple = self.current_time # update the process' turnaround time because it is finished _snake_case : Union[str, Any] = self.current_time - cp.arrival_time # add the process to queue that has finished queue finished.append(lowercase__ ) self.finish_queue.extend(lowercase__ ) # add finished process to finish queue # return finished processes queue and remaining processes queue return finished, ready_queue def UpperCAmelCase_ ( self ) -> deque[Process]: """simple docstring""" for i in range(self.number_of_queues - 1 ): _snake_case , _snake_case : Union[str, Any] = self.round_robin( self.ready_queue , self.time_slices[i] ) # the last queue has first_come_first_served algorithm self.first_come_first_served(self.ready_queue ) return self.finish_queue if __name__ == "__main__": import doctest UpperCAmelCase : Any = Process('P1', 0, 5_3) UpperCAmelCase : int = Process('P2', 0, 1_7) UpperCAmelCase : List[Any] = Process('P3', 0, 6_8) UpperCAmelCase : str = Process('P4', 0, 2_4) UpperCAmelCase : int = 3 UpperCAmelCase : List[Any] = [1_7, 2_5] UpperCAmelCase : List[str] = deque([Pa, Pa, Pa, Pa]) if len(time_slices) != number_of_queues - 1: raise SystemExit(0) doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])}) UpperCAmelCase : Optional[Any] = Process('P1', 0, 5_3) UpperCAmelCase : int = Process('P2', 0, 1_7) UpperCAmelCase : Dict = Process('P3', 0, 6_8) UpperCAmelCase : Dict = Process('P4', 0, 2_4) UpperCAmelCase : Tuple = 3 UpperCAmelCase : List[Any] = [1_7, 2_5] UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa]) UpperCAmelCase : Any = MLFQ(number_of_queues, time_slices, queue, 0) UpperCAmelCase : str = mlfq.multi_level_feedback_queue() # print total waiting times of processes(P1, P2, P3, P4) print( F"""waiting time:\ \t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print completion times of processes(P1, P2, P3, P4) print( F"""completion time:\ \t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print total turnaround times of processes(P1, P2, P3, P4) print( F"""turnaround time:\ \t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}""" ) # print sequence of finished processes print( F"""sequence of finished processes:\ {mlfq.calculate_sequence_of_finish_queue()}""" )
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' import socket def _a ( ): """simple docstring""" _snake_case : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _snake_case : Dict = socket.gethostname() _snake_case : List[Any] = 12_312 sock.connect((host, port) ) sock.send(b'''Hello server!''' ) with open('''Received_file''' , '''wb''' ) as out_file: print('''File opened''' ) print('''Receiving data...''' ) while True: _snake_case : Union[str, Any] = sock.recv(1_024 ) if not data: break out_file.write(lowerCAmelCase_ ) print('''Successfully received the file''' ) sock.close() print('''Connection closed''' ) if __name__ == "__main__": main()
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : str = ['model.decoder.embed_positions.weights'] def _a ( lowerCAmelCase_ ): """simple docstring""" if "emb" in name: _snake_case : Union[str, Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: _snake_case : int = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: _snake_case : List[Any] = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: _snake_case : Dict = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: _snake_case : Tuple = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: _snake_case : List[str] = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: _snake_case : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: _snake_case : Dict = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: _snake_case : Dict = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: _snake_case : Union[str, Any] = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: _snake_case : Optional[Any] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = list(state_dict.keys() ) _snake_case : Optional[Any] = {} for key in keys: _snake_case : str = state_dict.pop(lowerCAmelCase_ ) _snake_case : List[str] = rename_keys(lowerCAmelCase_ ) if "in_proj_weight" in key: # split fused qkv proj _snake_case : Dict = val[:hidden_size, :] _snake_case : List[Any] = val[hidden_size : 2 * hidden_size, :] _snake_case : Union[str, Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _snake_case : List[str] = val else: _snake_case : Dict = val return state_dict, enc_dec_proj_state_dict def _a ( lowerCAmelCase_ ): """simple docstring""" if checkpoint == "small": # default config values _snake_case : Union[str, Any] = 1_024 _snake_case : Tuple = 24 _snake_case : Union[str, Any] = 16 elif checkpoint == "medium": _snake_case : List[Any] = 1_536 _snake_case : Optional[int] = 48 _snake_case : str = 24 elif checkpoint == "large": _snake_case : Tuple = 2_048 _snake_case : Union[str, Any] = 48 _snake_case : Any = 32 else: raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' ) _snake_case : Tuple = MusicgenDecoderConfig( hidden_size=lowerCAmelCase_ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCAmelCase_ , num_attention_heads=lowerCAmelCase_ , ) return config @torch.no_grad() def _a ( lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="cpu" ): """simple docstring""" _snake_case : List[Any] = MusicGen.get_pretrained(lowerCAmelCase_ , device=lowerCAmelCase_ ) _snake_case : Union[str, Any] = decoder_config_from_checkpoint(lowerCAmelCase_ ) _snake_case : Optional[int] = fairseq_model.lm.state_dict() _snake_case , _snake_case : str = rename_state_dict( lowerCAmelCase_ , hidden_size=decoder_config.hidden_size ) _snake_case : int = TaEncoderModel.from_pretrained('''t5-base''' ) _snake_case : Optional[Any] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) _snake_case : Optional[int] = MusicgenForCausalLM(lowerCAmelCase_ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _snake_case , _snake_case : Optional[Any] = decoder.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' ) if len(lowerCAmelCase_ ) > 0: raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' ) # init the composite model _snake_case : List[str] = MusicgenForConditionalGeneration(text_encoder=lowerCAmelCase_ , audio_encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(lowerCAmelCase_ ) # check we can do a forward pass _snake_case : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _snake_case : Optional[int] = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _snake_case : Optional[int] = model(input_ids=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ ).logits if logits.shape != (8, 1, 2_048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor _snake_case : List[str] = AutoTokenizer.from_pretrained('''t5-base''' ) _snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) _snake_case : Union[str, Any] = MusicgenProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) # set the appropriate bos/pad token ids _snake_case : Tuple = 2_048 _snake_case : List[str] = 2_048 # set other default generation config params _snake_case : List[str] = int(30 * audio_encoder.config.frame_rate ) _snake_case : Optional[Any] = True _snake_case : int = 3.0 if pytorch_dump_folder is not None: Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) if repo_id: logger.info(f'''Pushing model {checkpoint} to {repo_id}''' ) model.push_to_hub(lowerCAmelCase_ ) processor.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) UpperCAmelCase : List[str] = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ = 1_000_000 ): """simple docstring""" _snake_case : Tuple = 1 _snake_case : Any = 1 _snake_case : Union[str, Any] = {1: 1} for inputa in range(2 , lowerCAmelCase_ ): _snake_case : List[Any] = 0 _snake_case : Tuple = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: _snake_case : Optional[Any] = (3 * number) + 1 counter += 1 if inputa not in counters: _snake_case : Optional[int] = counter if counter > pre_counter: _snake_case : Optional[int] = inputa _snake_case : Any = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Any = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'adapter_layer': 'encoder.layers.*.adapter_layer', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', 'pooling_layer.linear': 'projector', 'pooling_layer.projection': 'classifier', } UpperCAmelCase : Optional[int] = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'projector', 'classifier', ] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = {} with open(lowerCAmelCase_ , '''r''' ) as file: for line_number, line in enumerate(lowerCAmelCase_ ): _snake_case : Any = line.strip() if line: _snake_case : List[Any] = line.split() _snake_case : List[Any] = line_number _snake_case : Optional[int] = words[0] _snake_case : int = value return result def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for attribute in key.split('''.''' ): _snake_case : Optional[int] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowerCAmelCase_ ): _snake_case : Optional[int] = PARAM_MAPPING[full_name.split('''.''' )[-1]] _snake_case : Dict = '''param''' if weight_type is not None and weight_type != "param": _snake_case : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape elif weight_type is not None and weight_type == "param": _snake_case : int = hf_pointer for attribute in hf_param_name.split('''.''' ): _snake_case : str = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[int] = shape_pointer.shape # let's reduce dimension _snake_case : Optional[int] = value[0] else: _snake_case : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case : Any = value elif weight_type == "weight_g": _snake_case : Dict = value elif weight_type == "weight_v": _snake_case : Dict = value elif weight_type == "bias": _snake_case : Optional[Any] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): _snake_case : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[Any] = value else: _snake_case : Tuple = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowerCAmelCase_ ): _snake_case : int = PARAM_MAPPING[full_name.split('''.''' )[-1]] _snake_case : Dict = '''param''' if weight_type is not None and weight_type != "param": _snake_case : str = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": _snake_case : int = '''.'''.join([key, hf_param_name] ) else: _snake_case : List[str] = key _snake_case : Optional[int] = value if '''lm_head''' in full_key else value[0] UpperCAmelCase : Optional[int] = { 'W_a': 'linear_1.weight', 'W_b': 'linear_2.weight', 'b_a': 'linear_1.bias', 'b_b': 'linear_2.bias', 'ln_W': 'norm.weight', 'ln_b': 'norm.bias', } def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ): """simple docstring""" _snake_case : Optional[Any] = False for key, mapped_key in MAPPING.items(): _snake_case : List[Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: _snake_case : Dict = True if "*" in mapped_key: _snake_case : Any = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2] _snake_case : str = mapped_key.replace('''*''' , lowerCAmelCase_ ) if "weight_g" in name: _snake_case : str = '''weight_g''' elif "weight_v" in name: _snake_case : List[Any] = '''weight_v''' elif "bias" in name: _snake_case : List[Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj _snake_case : int = '''weight''' else: _snake_case : Optional[int] = None if hf_dict is not None: rename_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return is_used return is_used def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = [] _snake_case : Optional[int] = fairseq_model.state_dict() _snake_case : Optional[int] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): _snake_case : Dict = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case : Optional[int] = True else: _snake_case : Optional[Any] = load_wavaveca_layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[str] = full_name.split('''conv_layers.''' )[-1] _snake_case : Tuple = name.split('''.''' ) _snake_case : int = int(items[0] ) _snake_case : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case : Dict = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case : Dict = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) _snake_case : int = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case : List[str] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False ): """simple docstring""" if config_path is not None: _snake_case : Dict = WavaVecaConfig.from_pretrained(lowerCAmelCase_ ) else: _snake_case : Tuple = WavaVecaConfig() if is_seq_class: _snake_case : List[str] = read_txt_into_dict(lowerCAmelCase_ ) _snake_case : Optional[Any] = idalabel _snake_case : Dict = WavaVecaForSequenceClassification(lowerCAmelCase_ ) _snake_case : List[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) feature_extractor.save_pretrained(lowerCAmelCase_ ) elif is_finetuned: if dict_path: _snake_case : int = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case : Optional[int] = target_dict.pad_index _snake_case : Tuple = target_dict.bos_index _snake_case : Dict = target_dict.eos_index _snake_case : Dict = len(target_dict.symbols ) _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , '''vocab.json''' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) _snake_case : Optional[Any] = target_dict.indices # fairseq has the <pad> and <s> switched _snake_case : Any = 0 _snake_case : List[str] = 1 with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : str = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , ) _snake_case : Any = True if config.feat_extract_norm == '''layer''' else False _snake_case : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) _snake_case : Any = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) _snake_case : List[Any] = WavaVecaForCTC(lowerCAmelCase_ ) else: _snake_case : str = WavaVecaForPreTraining(lowerCAmelCase_ ) if is_finetuned or is_seq_class: _snake_case , _snake_case , _snake_case : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _snake_case : Tuple = argparse.Namespace(task='''audio_pretraining''' ) _snake_case : str = fairseq.tasks.setup_task(lowerCAmelCase_ ) _snake_case , _snake_case , _snake_case : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ ) _snake_case : Any = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) parser.add_argument( '--is_seq_class', action='store_true', help='Whether the model to convert is a fine-tuned sequence classification model or not', ) UpperCAmelCase : Optional[int] = parser.parse_args() UpperCAmelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if dataset.ndim != value_array.ndim: _snake_case : Tuple = ( '''Wrong input data\'s dimensions... ''' f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(lowerCAmelCase_ ) try: if dataset.shape[1] != value_array.shape[1]: _snake_case : Union[str, Any] = ( '''Wrong input data\'s shape... ''' f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(lowerCAmelCase_ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('''Wrong shape''' ) if dataset.dtype != value_array.dtype: _snake_case : Optional[Any] = ( '''Input data have different datatype... ''' f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(lowerCAmelCase_ ) _snake_case : Union[str, Any] = [] for value in value_array: _snake_case : Tuple = euclidean(lowerCAmelCase_ , dataset[0] ) _snake_case : Optional[Any] = dataset[0].tolist() for dataset_value in dataset[1:]: _snake_case : Optional[Any] = euclidean(lowerCAmelCase_ , lowerCAmelCase_ ) if dist > temp_dist: _snake_case : List[Any] = temp_dist _snake_case : str = dataset_value.tolist() answer.append([vector, dist] ) return answer def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return np.dot(lowerCAmelCase_ , lowerCAmelCase_ ) / (norm(lowerCAmelCase_ ) * norm(lowerCAmelCase_ )) if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=a__ ): _lowercase : str = ["""note_seq"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''note_seq'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''note_seq'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''note_seq'''] )
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance UpperCAmelCase : str = 6_37_81_37.0 UpperCAmelCase : List[Any] = 6_35_67_52.31_42_45 UpperCAmelCase : Union[str, Any] = 6_3_7_8_1_3_7 def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _snake_case : Optional[Any] = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) ) _snake_case : Tuple = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _snake_case : List[str] = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _snake_case : int = (b_lata + b_lata) / 2 _snake_case : Optional[int] = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _snake_case : Dict = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2) _snake_case : Optional[Any] = cos(sigma / 2 ) ** 2 _snake_case : Optional[Any] = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _snake_case : Optional[Any] = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2) _snake_case : List[str] = sin(sigma / 2 ) ** 2 _snake_case : Optional[Any] = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase : Tuple = 1_6 UpperCAmelCase : Dict = 3_2 def _a ( lowerCAmelCase_ , lowerCAmelCase_ = 16 ): """simple docstring""" _snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _snake_case : Any = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(lowerCAmelCase_ ): # max_length=None => use the model max length (it's actually the default) _snake_case : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _snake_case : Optional[Any] = datasets.map( lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _snake_case : Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(lowerCAmelCase_ ): # On TPU it's best to pad everything to the same length or training will be very slow. _snake_case : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _snake_case : Tuple = 16 elif accelerator.mixed_precision != "no": _snake_case : Any = 8 else: _snake_case : Dict = None return tokenizer.pad( lowerCAmelCase_ , padding='''longest''' , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors='''pt''' , ) # Instantiate dataloaders. _snake_case : List[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) _snake_case : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCAmelCase : Dict = mocked_dataloaders # noqa: F811 def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowerCAmelCase_ ) == "1": _snake_case : Optional[Any] = 2 # New Code # _snake_case : List[str] = int(args.gradient_accumulation_steps ) # Initialize accelerator _snake_case : List[str] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( '''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _snake_case : Tuple = config['''lr'''] _snake_case : Optional[int] = int(config['''num_epochs'''] ) _snake_case : Dict = int(config['''seed'''] ) _snake_case : Optional[Any] = int(config['''batch_size'''] ) _snake_case : List[str] = evaluate.load('''glue''' , '''mrpc''' ) set_seed(lowerCAmelCase_ ) _snake_case , _snake_case : Any = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _snake_case : Tuple = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowerCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _snake_case : Optional[Any] = model.to(accelerator.device ) # Instantiate optimizer _snake_case : Any = AdamW(params=model.parameters() , lr=lowerCAmelCase_ ) # Instantiate scheduler _snake_case : List[Any] = get_linear_schedule_with_warmup( optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Tuple = accelerator.prepare( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Now we train the model for epoch in range(lowerCAmelCase_ ): model.train() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = model(**lowerCAmelCase_ ) _snake_case : Optional[Any] = output.loss accelerator.backward(lowerCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _snake_case : Union[str, Any] = model(**lowerCAmelCase_ ) _snake_case : List[Any] = outputs.logits.argmax(dim=-1 ) _snake_case , _snake_case : str = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , ) _snake_case : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , lowerCAmelCase_ ) def _a ( ): """simple docstring""" _snake_case : int = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''' , type=lowerCAmelCase_ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) _snake_case : List[Any] = parser.parse_args() _snake_case : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'} class lowerCamelCase (a__ ): _lowercase : Optional[int] = """openai-gpt""" _lowercase : List[Any] = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , lowercase__=40_478 , lowercase__=512 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=1E-5 , lowercase__=0.02 , lowercase__="cls_index" , lowercase__=True , lowercase__=None , lowercase__=True , lowercase__=0.1 , **lowercase__ , ) -> str: """simple docstring""" _snake_case : Optional[int] = vocab_size _snake_case : List[Any] = n_positions _snake_case : List[Any] = n_embd _snake_case : Tuple = n_layer _snake_case : str = n_head _snake_case : str = afn _snake_case : int = resid_pdrop _snake_case : Any = embd_pdrop _snake_case : Optional[int] = attn_pdrop _snake_case : List[str] = layer_norm_epsilon _snake_case : int = initializer_range _snake_case : List[str] = summary_type _snake_case : List[Any] = summary_use_proj _snake_case : Union[str, Any] = summary_activation _snake_case : Dict = summary_first_dropout _snake_case : str = summary_proj_to_labels super().__init__(**lowercase__ )
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' import math import flax.linen as nn import jax.numpy as jnp def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1.0E4 , lowerCAmelCase_ = False , lowerCAmelCase_ = 1.0 , ): """simple docstring""" assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' _snake_case : Dict = float(embedding_dim // 2 ) _snake_case : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) _snake_case : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment ) _snake_case : Tuple = jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 ) # scale embeddings _snake_case : Union[str, Any] = scale * emb if flip_sin_to_cos: _snake_case : str = jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 ) else: _snake_case : Union[str, Any] = jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 ) _snake_case : str = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] ) return signal class lowerCamelCase (nn.Module ): _lowercase : int = 32 _lowercase : jnp.dtype = jnp.floataa @nn.compact def __call__( self , lowercase__ ) -> Any: """simple docstring""" _snake_case : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowercase__ ) _snake_case : List[Any] = nn.silu(lowercase__ ) _snake_case : List[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowercase__ ) return temb class lowerCamelCase (nn.Module ): _lowercase : int = 32 _lowercase : bool = False _lowercase : float = 1 @nn.compact def __call__( self , lowercase__ ) -> Optional[int]: """simple docstring""" return get_sinusoidal_embeddings( lowercase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import re import string import numpy as np import datasets UpperCAmelCase : Optional[Any] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' UpperCAmelCase : int = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' UpperCAmelCase : List[Any] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , reference_urls=[] , ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=False , ) -> str: """simple docstring""" if regexes_to_ignore is not None: for s in regexes_to_ignore: _snake_case : Dict = np.array([re.sub(lowercase__ , '''''' , lowercase__ ) for x in predictions] ) _snake_case : Optional[int] = np.array([re.sub(lowercase__ , '''''' , lowercase__ ) for x in references] ) else: _snake_case : Tuple = np.asarray(lowercase__ ) _snake_case : Dict = np.asarray(lowercase__ ) if ignore_case: _snake_case : Dict = np.char.lower(lowercase__ ) _snake_case : Dict = np.char.lower(lowercase__ ) if ignore_punctuation: _snake_case : Optional[Any] = string.punctuation.maketrans('''''' , '''''' , string.punctuation ) _snake_case : int = np.char.translate(lowercase__ , table=lowercase__ ) _snake_case : Tuple = np.char.translate(lowercase__ , table=lowercase__ ) if ignore_numbers: _snake_case : Any = string.digits.maketrans('''''' , '''''' , string.digits ) _snake_case : Optional[Any] = np.char.translate(lowercase__ , table=lowercase__ ) _snake_case : str = np.char.translate(lowercase__ , table=lowercase__ ) _snake_case : str = predictions == references return {"exact_match": np.mean(lowercase__ ) * 100}
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ ) _snake_case : Optional[int] = FlaxAutoModelForSeqaSeqLM.from_config(config=lowerCAmelCase_ ) _snake_case : Optional[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ ) _snake_case : str = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": _snake_case : Union[str, Any] = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": _snake_case : Tuple = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _snake_case : int = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): _snake_case : Optional[Any] = f'''layers_{str(lowerCAmelCase_ )}''' # Self-Attention _snake_case : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] _snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] _snake_case : List[Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] _snake_case : Optional[int] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _snake_case : Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization _snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: _snake_case : List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _snake_case : Dict = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _snake_case : Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _snake_case : str = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _snake_case : int = flax_model.params['''encoder''']['''block'''][str(lowerCAmelCase_ )]['''layer'''] _snake_case : str = tax_attention_key _snake_case : Dict = tax_attention_out _snake_case : Optional[Any] = tax_attention_query _snake_case : int = tax_attention_value _snake_case : List[str] = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _snake_case : Tuple = tax_global_layer_norm if split_mlp_wi: _snake_case : List[Any] = tax_mlp_wi_a _snake_case : Any = tax_mlp_wi_a else: _snake_case : int = tax_mlp_wi _snake_case : Any = tax_mlp_wo _snake_case : Optional[Any] = tax_mlp_layer_norm _snake_case : Optional[Any] = flax_model_encoder_layer_block # Only for layer 0: _snake_case : Optional[Any] = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T _snake_case : List[Any] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": _snake_case : List[str] = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T _snake_case : str = tax_encoder_global_rel_embedding # Assigning _snake_case : Tuple = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] _snake_case : str = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): _snake_case : Union[str, Any] = f'''layers_{str(lowerCAmelCase_ )}''' # Self-Attention _snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] _snake_case : Tuple = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] _snake_case : List[str] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] _snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization _snake_case : Any = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention _snake_case : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] _snake_case : Union[str, Any] = tax_enc_dec_attention_module['''key''']['''kernel'''] _snake_case : Optional[int] = tax_enc_dec_attention_module['''out''']['''kernel'''] _snake_case : Union[str, Any] = tax_enc_dec_attention_module['''query''']['''kernel'''] _snake_case : List[str] = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization _snake_case : int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: _snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] _snake_case : List[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: _snake_case : Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] _snake_case : Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization _snake_case : Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning _snake_case : Optional[int] = flax_model.params['''decoder''']['''block'''][str(lowerCAmelCase_ )]['''layer'''] _snake_case : Tuple = tax_attention_key _snake_case : Union[str, Any] = tax_attention_out _snake_case : Union[str, Any] = tax_attention_query _snake_case : Tuple = tax_attention_value _snake_case : List[str] = tax_pre_attention_layer_norm _snake_case : List[Any] = tax_enc_dec_attention_key _snake_case : Optional[int] = tax_enc_dec_attention_out _snake_case : List[str] = tax_enc_dec_attention_query _snake_case : List[str] = tax_enc_dec_attention_value _snake_case : List[str] = tax_cross_layer_norm if split_mlp_wi: _snake_case : List[Any] = tax_mlp_wi_a _snake_case : List[Any] = tax_mlp_wi_a else: _snake_case : Any = tax_mlp_wi _snake_case : Dict = tax_mlp_wo _snake_case : int = txa_mlp_layer_norm _snake_case : Any = flax_model_decoder_layer_block # Decoder Normalization _snake_case : Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] _snake_case : List[Any] = txa_decoder_norm # Only for layer 0: _snake_case : Dict = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T _snake_case : int = tax_decoder_rel_embedding # Token Embeddings _snake_case : Any = tax_model['''target''']['''token_embedder''']['''embedding'''] _snake_case : Any = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: _snake_case : Union[str, Any] = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(lowerCAmelCase_ ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) UpperCAmelCase : List[Any] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = {'vocab_file': 'spiece.model'} UpperCAmelCase : Optional[Any] = { 'vocab_file': { 'bert_for_seq_generation': ( 'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model' ), } } UpperCAmelCase : int = {'bert_for_seq_generation': 5_1_2} class lowerCamelCase (a__ ): _lowercase : Tuple = VOCAB_FILES_NAMES _lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : List[int] = [] _lowercase : Any = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__ , lowercase__="<s>" , lowercase__="</s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<::::>" , lowercase__ = None , **lowercase__ , ) -> None: """simple docstring""" _snake_case : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , sep_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , ) _snake_case : Dict = vocab_file _snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(lowercase__ ) @property def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" return self.sp_model.get_piece_size() def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : str = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: """simple docstring""" _snake_case : Tuple = self.__dict__.copy() _snake_case : Union[str, Any] = None return state def __setstate__( self , lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Tuple = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _snake_case : List[Any] = {} _snake_case : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[str]: """simple docstring""" return self.sp_model.encode(lowercase__ , out_type=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" return self.sp_model.piece_to_id(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : Dict = self.sp_model.IdToPiece(lowercase__ ) return token def UpperCAmelCase_ ( self , lowercase__ ) -> int: """simple docstring""" _snake_case : Any = [] _snake_case : List[Any] = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowercase__ ) + token _snake_case : str = [] else: current_sub_tokens.append(lowercase__ ) out_string += self.sp_model.decode(lowercase__ ) return out_string.strip() def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _snake_case : Optional[int] = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowercase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowercase__ , '''wb''' ) as fi: _snake_case : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(lowercase__ ) return (out_vocab_file,)
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' import os from pathlib import Path def _a ( ): """simple docstring""" from torch.utils.cpp_extension import load _snake_case : Tuple = Path(lowerCAmelCase_ ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr''' _snake_case : Optional[Any] = [ root / filename for filename in [ '''vision.cpp''', os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ), os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ), ] ] load( '''MultiScaleDeformableAttention''' , lowerCAmelCase_ , with_cuda=lowerCAmelCase_ , extra_include_paths=[str(lowerCAmelCase_ )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[ '''-DCUDA_HAS_FP16=1''', '''-D__CUDA_NO_HALF_OPERATORS__''', '''-D__CUDA_NO_HALF_CONVERSIONS__''', '''-D__CUDA_NO_HALF2_OPERATORS__''', ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer UpperCAmelCase : Optional[Any] = logging.getLogger(__name__) def _a ( ): """simple docstring""" _snake_case : int = argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=lowerCAmelCase_ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=lowerCAmelCase_ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=lowerCAmelCase_ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=lowerCAmelCase_ , default=1_000 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=lowerCAmelCase_ , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=lowerCAmelCase_ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=lowerCAmelCase_ , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) _snake_case : Union[str, Any] = parser.parse_args() return args def _a ( lowerCAmelCase_ ): """simple docstring""" def fn(lowerCAmelCase_ ): return tokenizer(examples['''text'''] ) return fn def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = [] for i in range(len(tokenized_data['''input_ids'''] ) ): _snake_case : Tuple = { '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } _snake_case : List[Any] = tf.train.Features(feature=lowerCAmelCase_ ) _snake_case : str = tf.train.Example(features=lowerCAmelCase_ ) _snake_case : List[str] = example.SerializeToString() records.append(lowerCAmelCase_ ) return records def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: _snake_case : str = min(len(lowerCAmelCase_ ) , args.limit ) _snake_case : Optional[Any] = dataset.select(range(lowerCAmelCase_ ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) _snake_case : int = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) _snake_case : Dict = os.path.join(args.output_dir , args.split ) if not os.path.exists(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) else: _snake_case : Optional[int] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. _snake_case : List[str] = tokenize_function(lowerCAmelCase_ ) _snake_case : List[str] = dataset.map(lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCAmelCase_ ): # Concatenate all texts. _snake_case : Optional[int] = {k: sum(examples[k] , [] ) for k in examples.keys()} _snake_case : Optional[Any] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 _snake_case : int = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. _snake_case : int = { k: [t[i : i + args.max_length] for i in range(0 , lowerCAmelCase_ , args.max_length )] for k, t in concatenated_examples.items() } return result _snake_case : Tuple = dataset_tokenized.map(lowerCAmelCase_ , batched=lowerCAmelCase_ , batch_size=1_000 , num_proc=4 ) _snake_case : Tuple = 0 _snake_case : Union[str, Any] = 0 for shard in range(0 , len(lowerCAmelCase_ ) , args.shard_size ): _snake_case : Union[str, Any] = grouped_dataset[shard : shard + args.shard_size] _snake_case : Union[str, Any] = len(dataset_snapshot['''input_ids'''] ) _snake_case : Any = os.path.join(lowerCAmelCase_ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) _snake_case : int = get_serialized_examples(lowerCAmelCase_ ) with tf.io.TFRecordWriter(lowerCAmelCase_ ) as out_file: for i in range(len(lowerCAmelCase_ ) ): _snake_case : int = serialized_examples[i] out_file.write(lowerCAmelCase_ ) print('''Wrote file {} containing {} records'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : int = parse_args() main(args)
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) UpperCAmelCase : List[str] = logging.getLogger() def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = '''\n'''.join(lowerCAmelCase_ ) Path(lowerCAmelCase_ ).open('''w''' ).writelines(lowerCAmelCase_ ) UpperCAmelCase : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase : List[Any] = 'sshleifer/tiny-mbart' UpperCAmelCase : List[str] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class lowerCamelCase (a__ ): def UpperCAmelCase_ ( self , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : Tuple = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _snake_case : Union[str, Any] = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _snake_case : Any = [''' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'''] _dump_articles(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = str(Path(self.get_auto_remove_tmp_dir() ) / '''scores.json''' ) _snake_case : Optional[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _snake_case : Optional[int] = F''' run_eval_search.py {model} {input_file_name} {output_file_name} --score_path {score_path} --task {task} --num_beams 2 --length_penalty 2.0 '''.split() with patch.object(lowercase__ , '''argv''' , lowercase__ ): run_generate() assert Path(lowercase__ ).exists() # os.remove(Path(output_file_name)) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" self.run_eval_tester(lowercase__ ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" self.run_eval_tester(lowercase__ ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def UpperCAmelCase_ ( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : Dict = Path(self.get_auto_remove_tmp_dir() ) / '''utest_input.source''' _snake_case : Dict = input_file_name.parent / '''utest_output.txt''' assert not output_file_name.exists() _snake_case : List[Any] = { '''en''': ['''Machine learning is great, isn\'t it?''', '''I like to eat bananas''', '''Tomorrow is another great day!'''], '''de''': [ '''Maschinelles Lernen ist großartig, oder?''', '''Ich esse gerne Bananen''', '''Morgen ist wieder ein toller Tag!''', ], } _snake_case : Tuple = Path(self.get_auto_remove_tmp_dir() ) _snake_case : List[Any] = str(tmp_dir / '''scores.json''' ) _snake_case : Tuple = str(tmp_dir / '''val.target''' ) _dump_articles(lowercase__ , text['''en'''] ) _dump_articles(lowercase__ , text['''de'''] ) _snake_case : List[Any] = '''translation_en_to_de''' if model == T5_TINY else '''summarization''' _snake_case : Dict = F''' run_eval_search.py {model} {str(lowercase__ )} {str(lowercase__ )} --score_path {score_path} --reference_path {reference_path} --task {task} '''.split() testargs.extend(['''--search''', '''num_beams=1:2 length_penalty=0.9:1.0'''] ) with patch.object(lowercase__ , '''argv''' , lowercase__ ): with CaptureStdout() as cs: run_search() _snake_case : List[str] = [''' num_beams | length_penalty''', model, '''Best score args'''] _snake_case : str = ['''Info'''] if "translation" in task: expected_strings.append('''bleu''' ) else: expected_strings.extend(lowercase__ ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(lowercase__ ).exists() os.remove(Path(lowercase__ ) )
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) UpperCAmelCase : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = ['LayoutXLMTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = ['LayoutXLMTokenizerFast'] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Dict[Optional[str], Type[Formatter]] = {} UpperCAmelCase : Dict[Optional[str], str] = {} UpperCAmelCase : Dict[Optional[str], Exception] = {} def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , ): """simple docstring""" _snake_case : Tuple = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) _snake_case : Tuple = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) _snake_case : Optional[Any] = format_type def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None ): """simple docstring""" _snake_case : Tuple = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): _snake_case : Dict = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['python']) _register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow']) _register_formatter(NumpyFormatter, 'numpy', aliases=['np']) _register_formatter(PandasFormatter, 'pandas', aliases=['pd']) _register_formatter(CustomFormatter, 'custom') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch']) else: UpperCAmelCase : Tuple = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.') _register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, 'tensorflow', aliases=['tf']) else: UpperCAmelCase : Dict = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.') _register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, 'jax', aliases=[]) else: UpperCAmelCase : Optional[int] = ValueError('JAX needs to be installed to be able to return JAX arrays.') _register_unavailable_formatter(_jax_error, 'jax', aliases=[]) def _a ( lowerCAmelCase_ ): """simple docstring""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _a ( lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = get_format_type_from_alias(lowerCAmelCase_ ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**lowerCAmelCase_ ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _snake_case : Optional[Any] = str(bin(lowerCAmelCase_ ) )[2:] # remove the leading "0b" _snake_case : Optional[int] = str(bin(lowerCAmelCase_ ) )[2:] _snake_case : Any = max(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase_ ) , b_binary.zfill(lowerCAmelCase_ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right UpperCAmelCase : Any = 5_0_0_0_3 UpperCAmelCase : Optional[Any] = 5_0_0_0_2 @require_sentencepiece @require_tokenizers class lowerCamelCase (a__ , unittest.TestCase ): _lowercase : Any = PLBartTokenizer _lowercase : Optional[Any] = None _lowercase : List[str] = False def UpperCAmelCase_ ( self ) -> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _snake_case : Dict = PLBartTokenizer(lowercase__ , language_codes='''base''' , keep_accents=lowercase__ ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = PLBartTokenizer(lowercase__ , language_codes='''base''' , keep_accents=lowercase__ ) _snake_case : int = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _snake_case : Tuple = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _snake_case : Optional[int] = tokenizer.convert_tokens_to_ids(lowercase__ ) self.assertListEqual( lowercase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowercase__ ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) _snake_case : Any = tokenizer.vocab_size _snake_case : int = [tokenizer.convert_ids_to_tokens(lowercase__ ) for x in range(end - 4 , lowercase__ )] self.assertListEqual(lowercase__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] ) _snake_case : Tuple = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' _snake_case : Dict = tokenizer(lowercase__ ).input_ids self.assertEqual( tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ ) , lowercase__ , ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Union[str, Any] = PLBartTokenizer(lowercase__ , language_codes='''multi''' , keep_accents=lowercase__ ) _snake_case : Dict = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(lowercase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowercase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) _snake_case : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _snake_case : List[Any] = tokenizer.convert_tokens_to_ids(lowercase__ ) self.assertListEqual( lowercase__ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) _snake_case : Any = tokenizer.convert_ids_to_tokens(lowercase__ ) self.assertListEqual( lowercase__ , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) _snake_case : List[Any] = tokenizer.vocab_size _snake_case : Any = [tokenizer.convert_ids_to_tokens(lowercase__ ) for x in range(end - 7 , lowercase__ )] self.assertListEqual( lowercase__ , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] ) _snake_case : Optional[int] = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go''' _snake_case : str = tokenizer(lowercase__ ).input_ids self.assertEqual( tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ ) , lowercase__ , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase (unittest.TestCase ): _lowercase : Union[str, Any] = """uclanlp/plbart-python-en_XX""" _lowercase : Optional[int] = [ """def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])""", """def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""", ] _lowercase : List[Any] = [ """Returns the maximum value of a b c.""", """Sums the values of a b c.""", ] _lowercase : List[Any] = [ 134, 5_452, 33_460, 33_441, 33_463, 33_465, 33_463, 33_449, 988, 20, 33_456, 19, 33_456, 771, 39, 4_258, 889, 3_318, 33_441, 33_463, 33_465, 33_463, 33_449, 2_471, 2, PYTHON_CODE, ] @classmethod def UpperCAmelCase_ ( cls ) -> List[str]: """simple docstring""" _snake_case : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' ) _snake_case : Optional[Any] = 1 return cls def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" self.assertIn(lowercase__ , self.tokenizer.all_special_ids ) _snake_case : Optional[Any] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2] _snake_case : Any = self.tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ ) _snake_case : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase__ ) self.assertEqual(lowercase__ , lowercase__ ) self.assertNotIn(self.tokenizer.eos_token , lowercase__ ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Tuple = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20] self.assertIsInstance(src_text[0] , lowercase__ ) _snake_case : int = 10 _snake_case : int = self.tokenizer(lowercase__ , max_length=lowercase__ , truncation=lowercase__ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , lowercase__ ) self.assertEqual(len(lowercase__ ) , lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = tempfile.mkdtemp() _snake_case : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowercase__ ) _snake_case : List[Any] = PLBartTokenizer.from_pretrained(lowercase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase__ ) @require_torch def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase__ , return_tensors='''pt''' ) _snake_case : Any = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , lowercase__ ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" _snake_case : List[str] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , ) _snake_case : List[str] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) _snake_case : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowercase__ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Dict = self.tokenizer(self.src_text , padding=lowercase__ , truncation=lowercase__ , max_length=3 , return_tensors='''pt''' ) _snake_case : Tuple = self.tokenizer( text_target=self.tgt_text , padding=lowercase__ , truncation=lowercase__ , max_length=10 , return_tensors='''pt''' ) _snake_case : Optional[int] = targets['''input_ids'''] _snake_case : Optional[Any] = shift_tokens_right(lowercase__ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : int = self.tokenizer._build_translation_inputs( '''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' ) self.assertEqual( nested_simplify(lowercase__ ) , { # A, test, EOS, en_XX '''input_ids''': [[150, 242, 2, 50_003]], '''attention_mask''': [[1, 1, 1, 1]], # java '''forced_bos_token_id''': 50_001, } , )
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n_term == "": return [] _snake_case : list = [] for temp in range(int(lowerCAmelCase_ ) ): series.append(f'''1/{temp + 1}''' if series else '''1''' ) return series if __name__ == "__main__": UpperCAmelCase : List[str] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' import os UpperCAmelCase : Optional[int] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0} def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Tuple = 0 while index < len(lowerCAmelCase_ ) - 1: _snake_case : str = SYMBOLS[numerals[index]] _snake_case : List[Any] = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = '''''' _snake_case : Any = num // 1_000 numerals += m_count * "M" num %= 1_000 _snake_case : Any = num // 100 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 100 _snake_case : int = num // 10 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 10 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def _a ( lowerCAmelCase_ = "/p089_roman.txt" ): """simple docstring""" _snake_case : Tuple = 0 with open(os.path.dirname(lowerCAmelCase_ ) + roman_numerals_filename ) as filea: _snake_case : Optional[int] = filea.readlines() for line in lines: _snake_case : Any = line.strip() _snake_case : str = parse_roman_numerals(lowerCAmelCase_ ) _snake_case : Optional[int] = generate_roman_numerals(lowerCAmelCase_ ) savings += len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) return savings if __name__ == "__main__": print(F"""{solution() = }""")
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' from datetime import datetime import requests def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' _snake_case : Optional[Any] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowerCAmelCase_ ).content if __name__ == "__main__": UpperCAmelCase : Optional[Any] = input('Enter Video/IGTV url: ').strip() UpperCAmelCase : Any = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(F"""Done. Video saved to disk as {file_name}.""")
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : Tuple = ['''a''', '''b''', '''c'''] # Defaults to last layer if both are None _snake_case , _snake_case : Dict = get_aligned_output_features_output_indices(lowercase__ , lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , ['''c'''] ) self.assertEqual(lowercase__ , [2] ) # Out indices set to match out features _snake_case , _snake_case : str = get_aligned_output_features_output_indices(['''a''', '''c'''] , lowercase__ , lowercase__ ) self.assertEqual(lowercase__ , ['''a''', '''c'''] ) self.assertEqual(lowercase__ , [0, 2] ) # Out features set to match out indices _snake_case , _snake_case : Any = get_aligned_output_features_output_indices(lowercase__ , [0, 2] , lowercase__ ) self.assertEqual(lowercase__ , ['''a''', '''c'''] ) self.assertEqual(lowercase__ , [0, 2] ) # Out features selected from negative indices _snake_case , _snake_case : Optional[int] = get_aligned_output_features_output_indices(lowercase__ , [-3, -1] , lowercase__ ) self.assertEqual(lowercase__ , ['''a''', '''c'''] ) self.assertEqual(lowercase__ , [-3, -1] ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , lowercase__ ) # Out features must be a list with self.assertRaises(lowercase__ ): verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] ) # Out features must be a subset of stage names with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] ) # Out indices must be a list or tuple with self.assertRaises(lowercase__ ): verify_out_features_out_indices(lowercase__ , 0 , ['''a''', '''b'''] ) # Out indices must be a subset of stage names with self.assertRaises(lowercase__ ): verify_out_features_out_indices(lowercase__ , (0, 1) , ['''a'''] ) # Out features and out indices must be the same length with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] ) # Out features should match out indices with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] ) # Out features and out indices should be in order with self.assertRaises(lowercase__ ): verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] ) # Check passes with valid inputs verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : Optional[int] = BackboneMixin() _snake_case : List[Any] = ['''a''', '''b''', '''c'''] _snake_case : Union[str, Any] = ['''a''', '''c'''] _snake_case : Any = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _snake_case : List[str] = ['''a''', '''b'''] self.assertEqual(backbone.out_features , ['''a''', '''b'''] ) self.assertEqual(backbone.out_indices , [0, 1] ) _snake_case : Dict = [-3, -1] self.assertEqual(backbone.out_features , ['''a''', '''c'''] ) self.assertEqual(backbone.out_indices , [-3, -1] )
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase (a__ , unittest.TestCase ): _lowercase : int = LEDTokenizer _lowercase : List[Any] = LEDTokenizerFast _lowercase : Dict = True def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" super().setUp() _snake_case : Dict = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] _snake_case : str = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) ) _snake_case : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _snake_case : List[Any] = {'''unk_token''': '''<unk>'''} _snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(lowercase__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(lowercase__ ) ) def UpperCAmelCase_ ( self , **lowercase__ ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def UpperCAmelCase_ ( self , **lowercase__ ) -> List[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" return "lower newer", "lower newer" @cached_property def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' ) @cached_property def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' ) @require_torch def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] _snake_case : Optional[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Any = tokenizer(lowercase__ , max_length=len(lowercase__ ) , padding=lowercase__ , return_tensors='''pt''' ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) _snake_case : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase__ , lowercase__ ) @require_torch def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Dict = tokenizer(lowercase__ , padding=lowercase__ , return_tensors='''pt''' ) self.assertIn('''input_ids''' , lowercase__ ) self.assertIn('''attention_mask''' , lowercase__ ) self.assertNotIn('''labels''' , lowercase__ ) self.assertNotIn('''decoder_attention_mask''' , lowercase__ ) @require_torch def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Optional[Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Union[str, Any] = tokenizer(text_target=lowercase__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def UpperCAmelCase_ ( self ) -> int: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Optional[Any] = tokenizer( ['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=lowercase__ , truncation=lowercase__ , return_tensors='''pt''' ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" _snake_case : str = ['''A long paragraph for summarization.'''] _snake_case : int = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Optional[Any] = tokenizer(lowercase__ , return_tensors='''pt''' ) _snake_case : Union[str, Any] = tokenizer(text_target=lowercase__ , return_tensors='''pt''' ) _snake_case : Tuple = inputs['''input_ids'''] _snake_case : List[str] = targets['''input_ids'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _snake_case : Union[str, Any] = ['''Summary of the text.''', '''Another summary.'''] _snake_case : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _snake_case : str = tokenizer(lowercase__ , padding=lowercase__ ) _snake_case : str = [[0] * len(lowercase__ ) for x in encoded_output['''input_ids''']] _snake_case : List[str] = tokenizer.pad(lowercase__ ) self.assertSequenceEqual(outputs['''global_attention_mask'''] , lowercase__ ) def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _snake_case : int = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) _snake_case : str = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ ) _snake_case : List[Any] = '''A, <mask> AllenNLP sentence.''' _snake_case : Tuple = tokenizer_r.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) _snake_case : Union[str, Any] = tokenizer_p.encode_plus(lowercase__ , add_special_tokens=lowercase__ , return_token_type_ids=lowercase__ ) self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) _snake_case : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) _snake_case : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( lowercase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Dict = Vector([1, 2, 3] ) self.assertEqual(x.component(0 ) , 1 ) self.assertEqual(x.component(2 ) , 3 ) _snake_case : Union[str, Any] = Vector() def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Optional[int] = Vector([0, 0, 0, 0, 0, 1] ) self.assertEqual(str(lowercase__ ) , '''(0,0,0,0,0,1)''' ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : List[str] = Vector([1, 2, 3, 4] ) self.assertEqual(len(lowercase__ ) , 4 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Dict = Vector([1, 2] ) _snake_case : Union[str, Any] = Vector([1, 2, 3, 4, 5] ) _snake_case : Dict = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ) _snake_case : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5] ) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 ) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 ) self.assertEqual(z.euclidean_length() , 0 ) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Dict = Vector([1, 2, 3] ) _snake_case : int = Vector([1, 1, 1] ) self.assertEqual((x + y).component(0 ) , 2 ) self.assertEqual((x + y).component(1 ) , 3 ) self.assertEqual((x + y).component(2 ) , 4 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : int = Vector([1, 2, 3] ) _snake_case : Tuple = Vector([1, 1, 1] ) self.assertEqual((x - y).component(0 ) , 0 ) self.assertEqual((x - y).component(1 ) , 1 ) self.assertEqual((x - y).component(2 ) , 2 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : str = Vector([1, 2, 3] ) _snake_case : Any = Vector([2, -1, 4] ) # for test of dot product _snake_case : str = Vector([1, -2, -1] ) self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' ) self.assertEqual((a * b) , 0 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : List[Any] = Vector([1, 2, 3] ) _snake_case : Any = Vector([1, 0, 1] ) self.assertEqual(str(axpy(2 , lowercase__ , lowercase__ ) ) , '''(3,4,7)''' ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Optional[int] = Vector([1, 0, 0, 0, 0, 0] ) _snake_case : List[Any] = x.copy() self.assertEqual(str(lowercase__ ) , str(lowercase__ ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : List[str] = Vector([1, 0, 0] ) x.change_component(0 , 0 ) x.change_component(1 , 1 ) self.assertEqual(str(lowercase__ ) , '''(0,1,0)''' ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(lowercase__ ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Dict = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(minors[x][y] , a.minor(lowercase__ , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : List[Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height() ): for y in range(a.width() ): self.assertEqual(cofactors[x][y] , a.cofactor(lowercase__ , lowercase__ ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(-5 , a.determinant() ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : str = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 ) _snake_case : Union[str, Any] = Vector([1, 2, 3] ) self.assertEqual('''(14,32,50)''' , str(a * x ) ) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) a.change_component(0 , 2 , 5 ) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(lowercase__ ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) self.assertEqual(7 , a.component(2 , 1 ) , 0.01 ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Dict = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" _snake_case : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 ) _snake_case : Optional[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 ) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) ) def UpperCAmelCase_ ( self ) -> None: """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , ) if __name__ == "__main__": unittest.main()
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Union[str, Any] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Optional[Any] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Dict: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : List[str] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : List[str] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : str = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Optional[Any] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Dict = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Tuple: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : List[Any] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : Any = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> str: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : List[str] = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Any: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> int: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> int: """simple docstring""" requires_backends(cls , ['''flax'''] ) class lowerCamelCase (metaclass=a__ ): _lowercase : int = ["""flax"""] def __init__( self , *lowercase__ , **lowercase__ ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''flax'''] ) @classmethod def UpperCAmelCase_ ( cls , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax'''] )
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : int = [1] _snake_case , _snake_case , _snake_case : Union[str, Any] = 0, 0, 0 _snake_case : List[Any] = ugly_nums[ia] * 2 _snake_case : Optional[int] = ugly_nums[ia] * 3 _snake_case : List[str] = ugly_nums[ia] * 5 for _ in range(1 , lowerCAmelCase_ ): _snake_case : List[str] = min(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ugly_nums.append(lowerCAmelCase_ ) if next_num == next_a: ia += 1 _snake_case : Optional[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 _snake_case : int = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 _snake_case : Any = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(F"""{ugly_numbers(2_0_0) = }""")
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' UpperCAmelCase : Dict = [0, 2, 4, 6, 8] UpperCAmelCase : List[str] = [1, 3, 5, 7, 9] def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 _snake_case : Union[str, Any] = 0 for digit in range(10 ): _snake_case : Optional[int] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , lowerCAmelCase_ , lowerCAmelCase_ ) return result _snake_case : List[Any] = 0 for digita in range(10 ): _snake_case : Tuple = digita if (remainder + digita) % 2 == 0: _snake_case : str = ODD_DIGITS else: _snake_case : Optional[Any] = EVEN_DIGITS for digita in other_parity_digits: _snake_case : Union[str, Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCAmelCase_ , lowerCAmelCase_ , ) return result def _a ( lowerCAmelCase_ = 9 ): """simple docstring""" _snake_case : List[Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(lowerCAmelCase_ , 0 , [0] * length , lowerCAmelCase_ ) return result if __name__ == "__main__": print(F"""{solution() = }""")
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : str = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' ) _snake_case : Optional[int] = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !" _snake_case : int = model(lowercase__ )['''last_hidden_state'''] _snake_case : List[Any] = tf.TensorShape((1, 10, 768) ) self.assertEqual(output.shape , lowercase__ ) # compare the actual values for a slice. _snake_case : int = tf.convert_to_tensor( [[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' import argparse import collections import json import os import re import string import sys import numpy as np UpperCAmelCase : List[str] = re.compile(R'\b(a|an|the)\b', re.UNICODE) UpperCAmelCase : Dict = None def _a ( ): """simple docstring""" _snake_case : int = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''' ) parser.add_argument('''data_file''' , metavar='''data.json''' , help='''Input data JSON file.''' ) parser.add_argument('''pred_file''' , metavar='''pred.json''' , help='''Model predictions.''' ) parser.add_argument( '''--out-file''' , '''-o''' , metavar='''eval.json''' , help='''Write accuracy metrics to file (default is stdout).''' ) parser.add_argument( '''--na-prob-file''' , '''-n''' , metavar='''na_prob.json''' , help='''Model estimates of probability of no answer.''' ) parser.add_argument( '''--na-prob-thresh''' , '''-t''' , type=lowerCAmelCase_ , default=1.0 , help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' , ) parser.add_argument( '''--out-image-dir''' , '''-p''' , metavar='''out_images''' , default=lowerCAmelCase_ , help='''Save precision-recall curves to directory.''' ) parser.add_argument('''--verbose''' , '''-v''' , action='''store_true''' ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[str] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _snake_case : Union[str, Any] = bool(qa['''answers''']['''text'''] ) return qid_to_has_ans def _a ( lowerCAmelCase_ ): """simple docstring""" def remove_articles(lowerCAmelCase_ ): return ARTICLES_REGEX.sub(''' ''' , lowerCAmelCase_ ) def white_space_fix(lowerCAmelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCAmelCase_ ): _snake_case : List[str] = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCAmelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase_ ) ) ) ) def _a ( lowerCAmelCase_ ): """simple docstring""" if not s: return [] return normalize_answer(lowerCAmelCase_ ).split() def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return int(normalize_answer(lowerCAmelCase_ ) == normalize_answer(lowerCAmelCase_ ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = get_tokens(lowerCAmelCase_ ) _snake_case : int = get_tokens(lowerCAmelCase_ ) _snake_case : str = collections.Counter(lowerCAmelCase_ ) & collections.Counter(lowerCAmelCase_ ) _snake_case : List[Any] = sum(common.values() ) if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _snake_case : Union[str, Any] = 1.0 * num_same / len(lowerCAmelCase_ ) _snake_case : Any = 1.0 * num_same / len(lowerCAmelCase_ ) _snake_case : List[Any] = (2 * precision * recall) / (precision + recall) return fa def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = {} _snake_case : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _snake_case : str = qa['''id'''] _snake_case : List[Any] = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowerCAmelCase_ )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _snake_case : Any = [''''''] if qid not in preds: print(f'''Missing prediction for {qid}''' ) continue _snake_case : Any = preds[qid] # Take max over all gold answers _snake_case : Union[str, Any] = max(compute_exact(lowerCAmelCase_ , lowerCAmelCase_ ) for a in gold_answers ) _snake_case : Dict = max(compute_fa(lowerCAmelCase_ , lowerCAmelCase_ ) for a in gold_answers ) return exact_scores, fa_scores def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = {} for qid, s in scores.items(): _snake_case : Any = na_probs[qid] > na_prob_thresh if pred_na: _snake_case : Optional[Any] = float(not qid_to_has_ans[qid] ) else: _snake_case : int = s return new_scores def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None ): """simple docstring""" if not qid_list: _snake_case : List[Any] = len(lowerCAmelCase_ ) return collections.OrderedDict( [ ('''exact''', 100.0 * sum(exact_scores.values() ) / total), ('''f1''', 100.0 * sum(fa_scores.values() ) / total), ('''total''', total), ] ) else: _snake_case : Tuple = len(lowerCAmelCase_ ) return collections.OrderedDict( [ ('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ('''total''', total), ] ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for k in new_eval: _snake_case : Dict = new_eval[k] def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" plt.step(lowerCAmelCase_ , lowerCAmelCase_ , color='''b''' , alpha=0.2 , where='''post''' ) plt.fill_between(lowerCAmelCase_ , lowerCAmelCase_ , step='''post''' , alpha=0.2 , color='''b''' ) plt.xlabel('''Recall''' ) plt.ylabel('''Precision''' ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(lowerCAmelCase_ ) plt.savefig(lowerCAmelCase_ ) plt.clf() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None ): """simple docstring""" _snake_case : Union[str, Any] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : na_probs[k] ) _snake_case : Any = 0.0 _snake_case : Optional[Any] = 1.0 _snake_case : str = 0.0 _snake_case : List[str] = [1.0] _snake_case : str = [0.0] _snake_case : Optional[Any] = 0.0 for i, qid in enumerate(lowerCAmelCase_ ): if qid_to_has_ans[qid]: true_pos += scores[qid] _snake_case : str = true_pos / float(i + 1 ) _snake_case : List[str] = true_pos / float(lowerCAmelCase_ ) if i == len(lowerCAmelCase_ ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(lowerCAmelCase_ ) recalls.append(lowerCAmelCase_ ) if out_image: plot_pr_curve(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return {"ap": 100.0 * avg_prec} def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if out_image_dir and not os.path.exists(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) _snake_case : str = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _snake_case : List[str] = make_precision_recall_eval( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , '''pr_exact.png''' ) , title='''Precision-Recall curve for Exact Match score''' , ) _snake_case : Any = make_precision_recall_eval( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , '''pr_f1.png''' ) , title='''Precision-Recall curve for F1 score''' , ) _snake_case : Any = {k: float(lowerCAmelCase_ ) for k, v in qid_to_has_ans.items()} _snake_case : Optional[int] = make_precision_recall_eval( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , out_image=os.path.join(lowerCAmelCase_ , '''pr_oracle.png''' ) , title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' , ) merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''pr_exact''' ) merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''pr_f1''' ) merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''pr_oracle''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if not qid_list: return _snake_case : int = [na_probs[k] for k in qid_list] _snake_case : List[Any] = np.ones_like(lowerCAmelCase_ ) / float(len(lowerCAmelCase_ ) ) plt.hist(lowerCAmelCase_ , weights=lowerCAmelCase_ , bins=20 , range=(0.0, 1.0) ) plt.xlabel('''Model probability of no-answer''' ) plt.ylabel('''Proportion of dataset''' ) plt.title(f'''Histogram of no-answer probability: {name}''' ) plt.savefig(os.path.join(lowerCAmelCase_ , f'''na_prob_hist_{name}.png''' ) ) plt.clf() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _snake_case : str = num_no_ans _snake_case : int = cur_score _snake_case : Union[str, Any] = 0.0 _snake_case : List[str] = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : na_probs[k] ) for i, qid in enumerate(lowerCAmelCase_ ): if qid not in scores: continue if qid_to_has_ans[qid]: _snake_case : Optional[Any] = scores[qid] else: if preds[qid]: _snake_case : Optional[Any] = -1 else: _snake_case : str = 0 cur_score += diff if cur_score > best_score: _snake_case : int = cur_score _snake_case : int = na_probs[qid] return 100.0 * best_score / len(lowerCAmelCase_ ), best_thresh def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case : Dict = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case , _snake_case : str = find_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : str = best_exact _snake_case : Any = exact_thresh _snake_case : Union[str, Any] = best_fa _snake_case : List[str] = fa_thresh def _a ( ): """simple docstring""" with open(OPTS.data_file ) as f: _snake_case : str = json.load(lowerCAmelCase_ ) _snake_case : List[str] = dataset_json['''data'''] with open(OPTS.pred_file ) as f: _snake_case : List[Any] = json.load(lowerCAmelCase_ ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _snake_case : List[Any] = json.load(lowerCAmelCase_ ) else: _snake_case : List[Any] = {k: 0.0 for k in preds} _snake_case : int = make_qid_to_has_ans(lowerCAmelCase_ ) # maps qid to True/False _snake_case : Optional[int] = [k for k, v in qid_to_has_ans.items() if v] _snake_case : Any = [k for k, v in qid_to_has_ans.items() if not v] _snake_case , _snake_case : Union[str, Any] = get_raw_scores(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh ) _snake_case : Dict = apply_no_ans_threshold(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.na_prob_thresh ) _snake_case : Any = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ ) if has_ans_qids: _snake_case : Optional[int] = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_ ) merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''HasAns''' ) if no_ans_qids: _snake_case : int = make_eval_dict(lowerCAmelCase_ , lowerCAmelCase_ , qid_list=lowerCAmelCase_ ) merge_eval(lowerCAmelCase_ , lowerCAmelCase_ , '''NoAns''' ) if OPTS.na_prob_file: find_all_best_thresh(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir ) histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , '''hasAns''' ) histogram_na_prob(lowerCAmelCase_ , lowerCAmelCase_ , OPTS.out_image_dir , '''noAns''' ) if OPTS.out_file: with open(OPTS.out_file , '''w''' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) else: print(json.dumps(lowerCAmelCase_ , indent=2 ) ) if __name__ == "__main__": UpperCAmelCase : Any = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt main()
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for attribute in key.split('''.''' ): _snake_case : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ) if weight_type is not None: _snake_case : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape else: _snake_case : int = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": _snake_case : List[str] = value elif weight_type == "weight_g": _snake_case : Tuple = value elif weight_type == "weight_v": _snake_case : str = value elif weight_type == "bias": _snake_case : Optional[int] = value else: _snake_case : str = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = [] _snake_case : str = fairseq_model.state_dict() _snake_case : Tuple = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): _snake_case : int = False if "conv_layers" in name: load_conv_layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == '''group''' , ) _snake_case : int = True else: for key, mapped_key in MAPPING.items(): _snake_case : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): _snake_case : Any = True if "*" in mapped_key: _snake_case : List[Any] = name.split(lowerCAmelCase_ )[0].split('''.''' )[-2] _snake_case : str = mapped_key.replace('''*''' , lowerCAmelCase_ ) if "weight_g" in name: _snake_case : Tuple = '''weight_g''' elif "weight_v" in name: _snake_case : Tuple = '''weight_v''' elif "weight" in name: _snake_case : str = '''weight''' elif "bias" in name: _snake_case : List[str] = '''bias''' else: _snake_case : Union[str, Any] = None set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) continue if not is_used: unused_weights.append(lowerCAmelCase_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = full_name.split('''conv_layers.''' )[-1] _snake_case : Any = name.split('''.''' ) _snake_case : str = int(items[0] ) _snake_case : Dict = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) _snake_case : Any = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) _snake_case : Tuple = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) _snake_case : List[str] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) _snake_case : List[str] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowerCAmelCase_ ) @torch.no_grad() def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=True ): """simple docstring""" if config_path is not None: _snake_case : List[Any] = HubertConfig.from_pretrained(lowerCAmelCase_ ) else: _snake_case : Dict = HubertConfig() if is_finetuned: if dict_path: _snake_case : Dict = Dictionary.load(lowerCAmelCase_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq _snake_case : int = target_dict.pad_index _snake_case : List[str] = target_dict.bos_index _snake_case : Union[str, Any] = target_dict.eos_index _snake_case : Tuple = len(target_dict.symbols ) _snake_case : Optional[Any] = os.path.join(lowerCAmelCase_ , '''vocab.json''' ) if not os.path.isdir(lowerCAmelCase_ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowerCAmelCase_ ) ) return os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , lowerCAmelCase_ ) _snake_case : Any = WavaVecaCTCTokenizer( lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowerCAmelCase_ , ) _snake_case : Dict = True if config.feat_extract_norm == '''layer''' else False _snake_case : str = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , ) _snake_case : int = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) _snake_case : List[str] = HubertForCTC(lowerCAmelCase_ ) else: _snake_case : List[Any] = HubertModel(lowerCAmelCase_ ) if is_finetuned: _snake_case , _snake_case , _snake_case : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: _snake_case , _snake_case , _snake_case : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) _snake_case : Union[str, Any] = model[0].eval() recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) hf_wavavec.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) UpperCAmelCase : List[str] = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' from ...utils import is_note_seq_available, is_transformers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .notes_encoder import SpectrogramNotesEncoder from .continous_encoder import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import ( SpectrogramContEncoder, SpectrogramDiffusionPipeline, TaFilmDecoder, ) try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .midi_utils import MidiProcessor
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ = 1 , lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : int = 1 _snake_case : List[str] = 0 for divide_by_number in range(lowerCAmelCase_ , digit + 1 ): _snake_case : list[int] = [] _snake_case : Optional[Any] = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(lowerCAmelCase_ ): _snake_case : Dict = len(lowerCAmelCase_ ) _snake_case : List[Any] = divide_by_number else: has_been_divided.append(lowerCAmelCase_ ) _snake_case : Any = now_divide * 10 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar UpperCAmelCase : List[str] = TypeVar('T') class lowerCamelCase (Generic[T] ): def __init__( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" _snake_case : Any | T = None _snake_case : int = len(lowercase__ ) _snake_case : list[T] = [any_type for _ in range(self.N )] + arr _snake_case : Optional[Any] = fnc self.build() def UpperCAmelCase_ ( self ) -> None: """simple docstring""" for p in range(self.N - 1 , 0 , -1 ): _snake_case : Optional[int] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" p += self.N _snake_case : int = v while p > 1: _snake_case : str = p // 2 _snake_case : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> T | None: # noqa: E741 """simple docstring""" _snake_case , _snake_case : Optional[Any] = l + self.N, r + self.N _snake_case : T | None = None while l <= r: if l % 2 == 1: _snake_case : Any = self.st[l] if res is None else self.fn(lowercase__ , self.st[l] ) if r % 2 == 0: _snake_case : List[str] = self.st[r] if res is None else self.fn(lowercase__ , self.st[r] ) _snake_case , _snake_case : Any = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce UpperCAmelCase : Any = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2] UpperCAmelCase : Union[str, Any] = { 0: 7, 1: 2, 2: 6, 3: -1_4, 4: 5, 5: 4, 6: 7, 7: -1_0, 8: 9, 9: 1_0, 1_0: 1_2, 1_1: 1, } UpperCAmelCase : Optional[int] = SegmentTree(test_array, min) UpperCAmelCase : Dict = SegmentTree(test_array, max) UpperCAmelCase : Tuple = SegmentTree(test_array, lambda a, b: a + b) def _a ( ): """simple docstring""" for i in range(len(lowerCAmelCase_ ) ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): _snake_case : List[str] = reduce(lowerCAmelCase_ , test_array[i : j + 1] ) _snake_case : Optional[int] = reduce(lowerCAmelCase_ , test_array[i : j + 1] ) _snake_case : Dict = reduce(lambda lowerCAmelCase_ , lowerCAmelCase_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ ) assert max_range == max_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ ) assert sum_range == sum_segment_tree.query(lowerCAmelCase_ , lowerCAmelCase_ ) test_all_segments() for index, value in test_updates.items(): UpperCAmelCase : Tuple = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=lowercase__ ).to(lowercase__ ) _snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids _snake_case : Tuple = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids _snake_case : Any = model(input_ids.to(lowercase__ ) , labels=labels.to(lowercase__ ) ).loss _snake_case : int = -(labels.shape[-1] * loss.item()) _snake_case : Dict = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = len(set_a.intersection(lowerCAmelCase_ ) ) if alternative_union: _snake_case : Optional[Any] = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ ) else: _snake_case : List[str] = len(set_a.union(lowerCAmelCase_ ) ) return intersection / union if isinstance(lowerCAmelCase_ , (list, tuple) ) and isinstance(lowerCAmelCase_ , (list, tuple) ): _snake_case : Optional[int] = [element for element in set_a if element in set_b] if alternative_union: _snake_case : int = len(lowerCAmelCase_ ) + len(lowerCAmelCase_ ) return len(lowerCAmelCase_ ) / union else: _snake_case : List[str] = set_a + [element for element in set_b if element not in set_a] return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) return len(lowerCAmelCase_ ) / len(lowerCAmelCase_ ) return None if __name__ == "__main__": UpperCAmelCase : str = {'a', 'b', 'c', 'd', 'e'} UpperCAmelCase : Tuple = {'c', 'd', 'e', 'f', 'h', 'i'} print(jaccard_similarity(set_a, set_b))
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers UpperCAmelCase : int = 'python tqdm regex requests packaging filelock numpy tokenizers'.split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append('dataclasses') if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append('importlib_metadata') for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def _a ( lowerCAmelCase_ , lowerCAmelCase_=None ): """simple docstring""" require_version(deps[pkg] , lowerCAmelCase_ )
47
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
1
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
'''simple docstring''' from random import randint from tempfile import TemporaryFile import numpy as np def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = 0 if start < end: _snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Any = a[end] _snake_case : List[str] = a[pivot] _snake_case : Optional[int] = temp _snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 ) count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ ) return count def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = 0 _snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = a[end] _snake_case : Optional[Any] = a[pivot] _snake_case : Union[str, Any] = temp _snake_case : Union[str, Any] = start - 1 for index in range(lowerCAmelCase_ , lowerCAmelCase_ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value _snake_case : Optional[int] = new_pivot_index + 1 _snake_case : Optional[Any] = a[new_pivot_index] _snake_case : Tuple = a[index] _snake_case : str = temp _snake_case : Any = a[new_pivot_index + 1] _snake_case : str = a[end] _snake_case : Optional[int] = temp return new_pivot_index + 1, count UpperCAmelCase : Dict = TemporaryFile() UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array UpperCAmelCase : int = np.load(outfile) UpperCAmelCase : Optional[int] = len(M) - 1 UpperCAmelCase : str = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = [] _snake_case : Optional[int] = set({'''(''', '''[''', '''{'''} ) _snake_case : List[Any] = set({''')''', ''']''', '''}'''} ) _snake_case : Tuple = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''} for i in range(len(lowerCAmelCase_ ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCAmelCase_ ) == 0 or (len(lowerCAmelCase_ ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCAmelCase_ ) == 0 def _a ( ): """simple docstring""" _snake_case : Dict = input('''Enter sequence of brackets: ''' ) if is_balanced(lowerCAmelCase_ ): print(lowerCAmelCase_ , '''is balanced''' ) else: print(lowerCAmelCase_ , '''is not balanced''' ) if __name__ == "__main__": main()
47
'''simple docstring''' from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : str = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = ['YolosFeatureExtractor'] UpperCAmelCase : Dict = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def _a ( ): """simple docstring""" _snake_case : List[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _snake_case : List[str] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go _snake_case : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , '''func''' ): parser.print_help() exit(1 ) # Run _snake_case : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
47
1
'''simple docstring''' import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip UpperCAmelCase : List[str] = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def _a ( lowerCAmelCase_ ): """simple docstring""" if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return max(metric_fn(lowerCAmelCase_ , lowerCAmelCase_ ) for gt in ground_truths ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()] _snake_case : int = [] if args.gold_data_mode == "qa": _snake_case : Tuple = pd.read_csv(lowerCAmelCase_ , sep='''\t''' , header=lowerCAmelCase_ ) for answer_list in data[1]: _snake_case : Any = ast.literal_eval(lowerCAmelCase_ ) answers.append(lowerCAmelCase_ ) else: _snake_case : Optional[int] = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()] _snake_case : List[Any] = [[reference] for reference in references] _snake_case : str = 0 for prediction, ground_truths in zip(lowerCAmelCase_ , lowerCAmelCase_ ): total += 1 em += metric_max_over_ground_truths(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) fa += metric_max_over_ground_truths(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = 100.0 * em / total _snake_case : Dict = 100.0 * fa / total logger.info(f'''F1: {fa:.2f}''' ) logger.info(f'''EM: {em:.2f}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : int = args.k _snake_case : List[str] = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()] _snake_case : Dict = [line.strip() for line in open(lowerCAmelCase_ , '''r''' ).readlines()] _snake_case : str = 0 for hypo, reference in zip(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = set(hypo.split('''\t''' )[:k] ) _snake_case : int = set(reference.split('''\t''' ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _snake_case : str = 100.0 * em / total logger.info(f'''Precision@{k}: {em: .2f}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def strip_title(lowerCAmelCase_ ): if title.startswith('''"''' ): _snake_case : int = title[1:] if title.endswith('''"''' ): _snake_case : Optional[Any] = title[:-1] return title _snake_case : List[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase_ , return_tensors='''pt''' , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , )['''input_ids'''].to(args.device ) _snake_case : Any = rag_model.rag.question_encoder(lowerCAmelCase_ ) _snake_case : Any = question_enc_outputs[0] _snake_case : Optional[Any] = rag_model.retriever( lowerCAmelCase_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , ) _snake_case : Any = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _snake_case : Any = [] for docs in all_docs: _snake_case : Optional[int] = [strip_title(lowerCAmelCase_ ) for title in docs['''title''']] provenance_strings.append('''\t'''.join(lowerCAmelCase_ ) ) return provenance_strings def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" with torch.no_grad(): _snake_case : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( lowerCAmelCase_ , return_tensors='''pt''' , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ ) _snake_case : Tuple = inputs_dict.input_ids.to(args.device ) _snake_case : int = inputs_dict.attention_mask.to(args.device ) _snake_case : int = rag_model.generate( # rag_model overwrites generate lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=lowerCAmelCase_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _snake_case : Union[str, Any] = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) if args.print_predictions: for q, a in zip(lowerCAmelCase_ , lowerCAmelCase_ ): logger.info('''Q: {} - A: {}'''.format(lowerCAmelCase_ , lowerCAmelCase_ ) ) return answers def _a ( ): """simple docstring""" _snake_case : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( '''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=lowerCAmelCase_ , help=( '''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the''' ''' model_name_or_path''' ) , ) parser.add_argument( '''--index_name''' , default=lowerCAmelCase_ , choices=['''exact''', '''compressed''', '''legacy'''] , type=lowerCAmelCase_ , help='''RAG model retriever type''' , ) parser.add_argument( '''--index_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Path to the retrieval index''' , ) parser.add_argument('''--n_docs''' , default=5 , type=lowerCAmelCase_ , help='''Number of retrieved docs''' ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=lowerCAmelCase_ , help=( '''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates''' ''' precision@k.''' ) , ) parser.add_argument('''--k''' , default=1 , type=lowerCAmelCase_ , help='''k for the precision@k calculation''' ) parser.add_argument( '''--evaluation_set''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to a file containing evaluation samples''' , ) parser.add_argument( '''--gold_data_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to a tab-separated file with gold samples''' , ) parser.add_argument( '''--gold_data_mode''' , default='''qa''' , type=lowerCAmelCase_ , choices=['''qa''', '''ans'''] , help=( '''Format of the gold data file''' '''qa - a single line in the following format: question [tab] answer_list''' '''ans - a single line of the gold file contains the expected answer string''' ) , ) parser.add_argument( '''--predictions_path''' , type=lowerCAmelCase_ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , ) parser.add_argument( '''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , ) parser.add_argument( '''--eval_batch_size''' , default=8 , type=lowerCAmelCase_ , help='''Batch size per GPU/CPU for evaluation.''' , ) parser.add_argument( '''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , ) parser.add_argument( '''--num_beams''' , default=4 , type=lowerCAmelCase_ , help='''Number of beams to be used when generating answers''' , ) parser.add_argument('''--min_length''' , default=1 , type=lowerCAmelCase_ , help='''Min length of the generated answers''' ) parser.add_argument('''--max_length''' , default=50 , type=lowerCAmelCase_ , help='''Max length of the generated answers''' ) parser.add_argument( '''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , ) parser.add_argument( '''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , ) _snake_case : Optional[Any] = parser.parse_args() _snake_case : List[str] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) return args def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = {} if args.model_type is None: _snake_case : int = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith('''rag''' ): _snake_case : Optional[int] = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration _snake_case : Optional[int] = args.n_docs if args.index_name is not None: _snake_case : str = args.index_name if args.index_path is not None: _snake_case : Any = args.index_path else: _snake_case : str = BartForConditionalGeneration _snake_case : Any = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info('''Evaluate the following checkpoints: %s''' , lowerCAmelCase_ ) _snake_case : Optional[int] = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k _snake_case : str = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) ) score_fn(lowerCAmelCase_ , args.predictions_path , args.gold_data_path ) continue logger.info('''***** Running evaluation for {} *****'''.format(lowerCAmelCase_ ) ) logger.info(''' Batch size = %d''' , args.eval_batch_size ) logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) ) if args.model_type.startswith('''rag''' ): _snake_case : List[str] = RagRetriever.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) _snake_case : List[Any] = model_class.from_pretrained(lowerCAmelCase_ , retriever=lowerCAmelCase_ , **lowerCAmelCase_ ) model.retriever.init_retrieval() else: _snake_case : Optional[Any] = model_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) model.to(args.device ) with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file: _snake_case : Tuple = [] for line in tqdm(lowerCAmelCase_ ): questions.append(line.strip() ) if len(lowerCAmelCase_ ) == args.eval_batch_size: _snake_case : List[Any] = evaluate_batch_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) preds_file.write('''\n'''.join(lowerCAmelCase_ ) + '''\n''' ) preds_file.flush() _snake_case : Any = [] if len(lowerCAmelCase_ ) > 0: _snake_case : List[Any] = evaluate_batch_fn(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) preds_file.write('''\n'''.join(lowerCAmelCase_ ) ) preds_file.flush() score_fn(lowerCAmelCase_ , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": UpperCAmelCase : int = get_args() main(args)
47
'''simple docstring''' from collections.abc import Generator def _a ( ): """simple docstring""" _snake_case , _snake_case : Union[str, Any] = 0, 1 while True: _snake_case , _snake_case : List[str] = b, a + b yield b def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : List[str] = 1 _snake_case : Dict = fibonacci_generator() while len(str(next(lowerCAmelCase_ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class lowerCamelCase (yaml.SafeLoader ): def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" _snake_case : int = [self.constructed_objects[key_node] for key_node, _ in node.value] _snake_case : str = [tuple(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else key for key in keys] _snake_case : str = Counter(lowercase__ ) _snake_case : List[str] = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__=False ) -> Tuple: """simple docstring""" _snake_case : Tuple = super().construct_mapping(lowercase__ , deep=lowercase__ ) self._check_no_duplicates_on_constructed_node(lowercase__ ) return mapping def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : int = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: _snake_case : int = full_content[1:].index('''---''' ) + 1 _snake_case : Optional[Any] = '''\n'''.join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowerCAmelCase_ ) class lowerCamelCase (a__ ): # class attributes _lowercase : Tuple = {"""train_eval_index"""} # train-eval-index in the YAML metadata @classmethod def UpperCAmelCase_ ( cls , lowercase__ ) -> "DatasetMetadata": """simple docstring""" with open(lowercase__ , encoding='''utf-8''' ) as readme_file: _snake_case , _snake_case : Tuple = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowercase__ ) else: return cls() def UpperCAmelCase_ ( self , lowercase__ ) -> Dict: """simple docstring""" if path.exists(): with open(lowercase__ , encoding='''utf-8''' ) as readme_file: _snake_case : Any = readme_file.read() else: _snake_case : int = None _snake_case : Optional[int] = self._to_readme(lowercase__ ) with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as readme_file: readme_file.write(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ = None ) -> str: """simple docstring""" if readme_content is not None: _snake_case , _snake_case : Any = _split_yaml_from_readme(lowercase__ ) _snake_case : Tuple = '''---\n''' + self.to_yaml_string() + '''---\n''' + content else: _snake_case : int = '''---\n''' + self.to_yaml_string() + '''---\n''' return full_content @classmethod def UpperCAmelCase_ ( cls , lowercase__ ) -> "DatasetMetadata": """simple docstring""" _snake_case : Optional[int] = yaml.load(lowercase__ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields _snake_case : Optional[Any] = { (key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" return yaml.safe_dump( { (key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowercase__ , allow_unicode=lowercase__ , encoding='''utf-8''' , ).decode('''utf-8''' ) UpperCAmelCase : Optional[Any] = { 'image-classification': [], 'translation': [], 'image-segmentation': [], 'fill-mask': [], 'automatic-speech-recognition': [], 'token-classification': [], 'sentence-similarity': [], 'audio-classification': [], 'question-answering': [], 'summarization': [], 'zero-shot-classification': [], 'table-to-text': [], 'feature-extraction': [], 'other': [], 'multiple-choice': [], 'text-classification': [], 'text-to-image': [], 'text2text-generation': [], 'zero-shot-image-classification': [], 'tabular-classification': [], 'tabular-regression': [], 'image-to-image': [], 'tabular-to-text': [], 'unconditional-image-generation': [], 'text-retrieval': [], 'text-to-speech': [], 'object-detection': [], 'audio-to-audio': [], 'text-generation': [], 'conversational': [], 'table-question-answering': [], 'visual-question-answering': [], 'image-to-text': [], 'reinforcement-learning': [], 'voice-activity-detection': [], 'time-series-forecasting': [], 'document-question-answering': [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCAmelCase : Optional[Any] = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.') ap.add_argument('readme_filepath') UpperCAmelCase : Optional[int] = ap.parse_args() UpperCAmelCase : int = Path(args.readme_filepath) UpperCAmelCase : Optional[int] = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
47
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor UpperCAmelCase : str = logging.getLogger(__name__) UpperCAmelCase : Dict = 5_0 # max width of layer names UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _a ( lowerCAmelCase_ ): """simple docstring""" if args.calibrator == "max": _snake_case : Optional[int] = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) _snake_case : Tuple = '''histogram''' elif args.calibrator == "mse": _snake_case : int = '''histogram''' else: raise ValueError(f'''Invalid calibrator {args.calibrator}''' ) _snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ ) _snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(f'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ ) if args.quant_disable: set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ ) if args.quant_disable_keyword: set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ ) if args.quant_disable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ ) if args.quant_enable_layer_module: set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ ) if args.recalibrate_weights: recalibrate_weights(lowerCAmelCase_ ) if args.fuse_qkv: fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ ) if args.clip_gelu: clip_gelu(lowerCAmelCase_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f'''{name:80}: {module}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): for mod in [qq, qk, qv]: if not hasattr(lowerCAmelCase_ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return _snake_case : Tuple = qq._amax.detach().item() _snake_case : Tuple = qk._amax.detach().item() _snake_case : List[Any] = qv._amax.detach().item() _snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) qq._amax.fill_(lowerCAmelCase_ ) qk._amax.fill_(lowerCAmelCase_ ) qv._amax.fill_(lowerCAmelCase_ ) logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(f'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): _snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ ) _snake_case : List[str] = mod._input_quantizer._amax.data.detach().item() logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: _snake_case : Dict = mod.weight.shape[0] _snake_case : Optional[int] = mod._weight_quantizer._amax.detach() _snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) _snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) _snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set _snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach() logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) _snake_case : Tuple = amax def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ): """simple docstring""" if ignore is None: _snake_case : Dict = [] elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = [ignore] _snake_case : str = 0 for name, mod in model.named_modules(): if not hasattr(lowerCAmelCase_ , '''weight''' ): continue _snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) ) for name, mod in model.named_modules(): _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ ) _snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ ) if not hasattr(lowerCAmelCase_ , '''weight''' ): continue if type(lowerCAmelCase_ ) in ignore: continue if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]: continue _snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}''' _snake_case : Any = f'''Wgt:{weight_q.extra_repr()}''' _snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}''' if len(lowerCAmelCase_ ) <= line_width: logger.info(lowerCAmelCase_ ) else: logger.info(f'''{name:{name_width}} {act_str}''' ) logger.info(f'''{" ":{name_width}} {wgt_str}''' ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : str = 0 for name, mod in model.named_modules(): if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ): print(f'''{name:80} {mod}''' ) count += 1 print(f'''{count} TensorQuantizers found in model''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if quantizer_mod is not None: assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) else: logger.warning(f'''{name} has no {quantizer}''' ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' if which in ["input", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) if which in ["weight", "both"]: set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Any = f'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += f''' {k}={v}''' setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info(lowerCAmelCase_ )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase : int = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class lowerCamelCase (a__ ): _lowercase : int = """lxmert""" _lowercase : Optional[int] = {} def __init__( self , lowercase__=30_522 , lowercase__=768 , lowercase__=12 , lowercase__=9_500 , lowercase__=1_600 , lowercase__=400 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=512 , lowercase__=2 , lowercase__=0.02 , lowercase__=1E-1_2 , lowercase__=9 , lowercase__=5 , lowercase__=5 , lowercase__=2_048 , lowercase__=4 , lowercase__=6.67 , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , lowercase__=True , **lowercase__ , ) -> Dict: """simple docstring""" _snake_case : Optional[Any] = vocab_size _snake_case : Dict = hidden_size _snake_case : List[Any] = num_attention_heads _snake_case : Union[str, Any] = hidden_act _snake_case : int = intermediate_size _snake_case : Union[str, Any] = hidden_dropout_prob _snake_case : Dict = attention_probs_dropout_prob _snake_case : Optional[Any] = max_position_embeddings _snake_case : int = type_vocab_size _snake_case : int = initializer_range _snake_case : Any = layer_norm_eps _snake_case : Optional[Any] = num_qa_labels _snake_case : str = num_object_labels _snake_case : List[Any] = num_attr_labels _snake_case : Optional[Any] = l_layers _snake_case : Optional[int] = x_layers _snake_case : Dict = r_layers _snake_case : str = visual_feat_dim _snake_case : Union[str, Any] = visual_pos_dim _snake_case : Any = visual_loss_normalizer _snake_case : int = task_matched _snake_case : Optional[int] = task_mask_lm _snake_case : Tuple = task_obj_predict _snake_case : Tuple = task_qa _snake_case : List[str] = visual_obj_loss _snake_case : Optional[int] = visual_attr_loss _snake_case : str = visual_feat_loss _snake_case : Union[str, Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**lowercase__ )
47
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Tuple = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' ) _snake_case : Any = AutoTokenizer.from_pretrained('''google/mt5-small''' ) _snake_case : List[str] = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids _snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids _snake_case : Any = shift_tokens_right(lowercase__ , model.config.pad_token_id , model.config.decoder_start_token_id ) _snake_case : Any = model(lowercase__ , decoder_input_ids=lowercase__ ).logits _snake_case : Tuple = optax.softmax_cross_entropy(lowercase__ , onehot(lowercase__ , logits.shape[-1] ) ).mean() _snake_case : Tuple = -(labels.shape[-1] * loss.item()) _snake_case : Union[str, Any] = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
47
1
'''simple docstring''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = len(lowerCAmelCase_ ) _snake_case : List[Any] = len(lowerCAmelCase_ ) _snake_case : Any = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _snake_case : List[Any] = True for i in range(lowerCAmelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _snake_case : List[str] = True if a[i].islower(): _snake_case : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
47
'''simple docstring''' import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" _snake_case : Any = torch.nn.Linear(10 , 10 ) _snake_case : Optional[int] = torch.optim.SGD(model.parameters() , 0.1 ) _snake_case : List[str] = Accelerator() _snake_case : Optional[Any] = accelerator.prepare(lowercase__ ) try: pickle.loads(pickle.dumps(lowercase__ ) ) except Exception as e: self.fail(F'''Accelerated optimizer pickling failed with {e}''' ) AcceleratorState._reset_state()
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' UpperCAmelCase : Union[str, Any] = tuple[float, float, float] UpperCAmelCase : int = tuple[float, float, float] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = end_pointa[0] - end_pointa[0] _snake_case : Tuple = end_pointa[1] - end_pointa[1] _snake_case : Any = end_pointa[2] - end_pointa[2] return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Dict = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : List[str] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : Optional[int] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return tuple(round(lowerCAmelCase_ , lowerCAmelCase_ ) for x in vector ) == (0, 0, 0) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 10 ): """simple docstring""" _snake_case : str = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Tuple = create_vector(lowerCAmelCase_ , lowerCAmelCase_ ) return is_zero_vector(get_ad_vectors_cross(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
47
1
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int UpperCAmelCase : Tuple = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class lowerCamelCase (datasets.BuilderConfig ): _lowercase : Optional[datasets.Features] = None def _a ( lowerCAmelCase_ , lowerCAmelCase_ , ): """simple docstring""" import pyspark def generate_fn(): _snake_case : Tuple = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _snake_case : List[Any] = df_with_partition_id.select('''*''' ).where(f'''part_id = {partition_id}''' ).drop('''part_id''' ) _snake_case : Optional[Any] = partition_df.collect() _snake_case : List[Any] = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class lowerCamelCase (_BaseExamplesIterable ): def __init__( self , lowercase__ , lowercase__=None , ) -> Union[str, Any]: """simple docstring""" _snake_case : int = df _snake_case : Any = partition_order or range(self.df.rdd.getNumPartitions() ) _snake_case : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ) -> Optional[int]: """simple docstring""" yield from self.generate_examples_fn() def UpperCAmelCase_ ( self , lowercase__ ) -> "SparkExamplesIterable": """simple docstring""" _snake_case : Union[str, Any] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowercase__ ) return SparkExamplesIterable(self.df , partition_order=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> "SparkExamplesIterable": """simple docstring""" _snake_case : Tuple = self.split_shard_indices_by_worker(lowercase__ , lowercase__ ) return SparkExamplesIterable(self.df , partition_order=lowercase__ ) @property def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return len(self.partition_order ) class lowerCamelCase (datasets.DatasetBuilder ): _lowercase : List[Any] = SparkConfig def __init__( self , lowercase__ , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> Optional[int]: """simple docstring""" import pyspark _snake_case : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate() _snake_case : List[str] = df _snake_case : List[Any] = working_dir super().__init__( cache_dir=lowercase__ , config_name=str(self.df.semanticHash() ) , **lowercase__ , ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" def create_cache_and_write_probe(lowercase__ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowercase__ ) _snake_case : List[Any] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowercase__ , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _snake_case : int = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def UpperCAmelCase_ ( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" import pyspark def get_arrow_batch_size(lowercase__ ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _snake_case : Any = self.df.count() _snake_case : List[Any] = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _snake_case : List[str] = ( self.df.limit(lowercase__ ) .repartition(1 ) .mapInArrow(lowercase__ , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _snake_case : Optional[int] = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _snake_case : Union[str, Any] = min(lowercase__ , int(approx_total_size / max_shard_size ) ) _snake_case : Optional[int] = self.df.repartition(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: """simple docstring""" import pyspark _snake_case : int = ParquetWriter if file_format == '''parquet''' else ArrowWriter _snake_case : Optional[Any] = os.path.join(self._working_dir , os.path.basename(lowercase__ ) ) if self._working_dir else fpath _snake_case : int = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _snake_case : Tuple = self.config.features _snake_case : Dict = self._writer_batch_size _snake_case : Dict = self._fs.storage_options def write_arrow(lowercase__ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _snake_case : int = pyspark.TaskContext().taskAttemptId() _snake_case : List[str] = next(lowercase__ , lowercase__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _snake_case : List[str] = 0 _snake_case : str = writer_class( features=lowercase__ , path=working_fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , ) _snake_case : Optional[int] = pa.Table.from_batches([first_batch] ) writer.write_table(lowercase__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _snake_case , _snake_case : Any = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _snake_case : Tuple = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , writer_batch_size=lowercase__ , storage_options=lowercase__ , embed_local_files=lowercase__ , ) _snake_case : List[Any] = pa.Table.from_batches([batch] ) writer.write_table(lowercase__ ) if writer._num_bytes > 0: _snake_case , _snake_case : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowercase__ ) ): _snake_case : Any = os.path.join(os.path.dirname(lowercase__ ) , os.path.basename(lowercase__ ) ) shutil.move(lowercase__ , lowercase__ ) _snake_case : Dict = ( self.df.mapInArrow(lowercase__ , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = "arrow" , lowercase__ = None , lowercase__ = None , **lowercase__ , ) -> Optional[int]: """simple docstring""" self._validate_cache_dir() _snake_case : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowercase__ ) _snake_case : Any = not is_remote_filesystem(self._fs ) _snake_case : str = os.path.join if is_local else posixpath.join _snake_case : int = '''-TTTTT-SSSSS-of-NNNNN''' _snake_case : Dict = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _snake_case : Optional[int] = path_join(self._output_dir , lowercase__ ) _snake_case : int = 0 _snake_case : Tuple = 0 _snake_case : List[str] = 0 _snake_case : Optional[Any] = [] _snake_case : Optional[int] = [] for task_id, content in self._prepare_split_single(lowercase__ , lowercase__ , lowercase__ ): ( ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ( _snake_case ) , ) : Union[str, Any] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowercase__ ) _snake_case : Dict = total_num_examples _snake_case : List[Any] = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: _snake_case : Dict = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _snake_case : List[Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowercase__ , lowercase__ , lowercase__ , ): rename( lowercase__ , fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , F'''{global_shard_id:05d}''' ).replace('''NNNNN''' , F'''{total_shards:05d}''' ) , ) _snake_case : int = [] _snake_case : Dict = 0 for i in range(len(lowercase__ ) ): _snake_case , _snake_case : Union[str, Any] = task_id_and_num_shards[i] for shard_id in range(lowercase__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowercase__ , len(lowercase__ ) ).map(lambda lowercase__ : _rename_shard(*lowercase__ ) ).collect() else: # don't use any pattern _snake_case : Any = 0 _snake_case : int = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , F'''{shard_id:05d}''' ).replace('''TTTTT''' , F'''{task_id:05d}''' ) , fpath.replace(lowercase__ , '''''' ) , ) def UpperCAmelCase_ ( self , lowercase__ , ) -> SparkExamplesIterable: """simple docstring""" return SparkExamplesIterable(self.df )
47
'''simple docstring''' import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase : List[str] = logging.getLogger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(lowerCAmelCase_ ): if os.path.exists(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''config.json''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''config.json''' ) ) if os.path.exists(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(lowerCAmelCase_ , '''pytorch_model.bin''' ) ) else: os.makedirs(lowerCAmelCase_ ) model.save_pretrained(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Optional[Any] = 2 if unlogit: _snake_case : Any = torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = p * torch.log(lowerCAmelCase_ ) _snake_case : Optional[Any] = 0 return -plogp.sum(dim=-1 ) def _a ( lowerCAmelCase_ ): """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(lowerCAmelCase_ ) ) ) ) for row in range(len(lowerCAmelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=False ): """simple docstring""" _snake_case , _snake_case : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case : Tuple = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) _snake_case : Union[str, Any] = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) if head_mask is None: _snake_case : int = torch.ones(lowerCAmelCase_ , lowerCAmelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCAmelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case : Dict = None _snake_case : Dict = 0.0 _snake_case : Optional[int] = 0.0 for step, inputs in enumerate(tqdm(lowerCAmelCase_ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _snake_case : List[Any] = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case : List[Any] = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCAmelCase_ ): _snake_case : Union[str, Any] = entropy(attn.detach() , lowerCAmelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCAmelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case : Any = 2 _snake_case : List[str] = torch.pow(torch.pow(lowerCAmelCase_ , lowerCAmelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: _snake_case : Optional[int] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(lowerCAmelCase_ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(lowerCAmelCase_ ) logger.info('''Head ranked by importance scores''' ) _snake_case : str = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _snake_case : List[Any] = head_ranks.view_as(lowerCAmelCase_ ) print_ad_tensor(lowerCAmelCase_ ) return attn_entropy, head_importance, total_loss def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case , _snake_case , _snake_case : str = compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ ) _snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , lowerCAmelCase_ , original_score * args.masking_threshold ) _snake_case : int = torch.ones_like(lowerCAmelCase_ ) _snake_case : Optional[Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case : int = original_score while current_score >= original_score * args.masking_threshold: _snake_case : int = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case : Dict = float('''Inf''' ) _snake_case : Optional[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCAmelCase_ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _snake_case : Union[str, Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _snake_case : Tuple = new_head_mask.view(-1 ) _snake_case : List[str] = 0.0 _snake_case : str = new_head_mask.view_as(lowerCAmelCase_ ) _snake_case : Dict = new_head_mask.clone().detach() print_ad_tensor(lowerCAmelCase_ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case : Any = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : int = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , lowerCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(lowerCAmelCase_ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = datetime.now() _snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ ) _snake_case : Tuple = 1 / loss _snake_case : Dict = datetime.now() - before_time _snake_case : List[Any] = sum(p.numel() for p in model.parameters() ) _snake_case : int = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCAmelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Union[str, Any] = [ v, ] assert sum(len(lowerCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCAmelCase_ ) _snake_case : List[str] = sum(p.numel() for p in model.parameters() ) _snake_case : int = datetime.now() _snake_case , _snake_case , _snake_case : Optional[Any] = compute_heads_importance( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , compute_entropy=lowerCAmelCase_ , compute_importance=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , actually_pruned=lowerCAmelCase_ , ) _snake_case : Optional[int] = 1 / loss _snake_case : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , lowerCAmelCase_ , lowerCAmelCase_ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , lowerCAmelCase_ , lowerCAmelCase_ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(lowerCAmelCase_ , args.output_dir ) def _a ( ): """simple docstring""" _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=lowerCAmelCase_ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=lowerCAmelCase_ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=lowerCAmelCase_ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=lowerCAmelCase_ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=lowerCAmelCase_ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=lowerCAmelCase_ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=lowerCAmelCase_ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 ) parser.add_argument('''--local_rank''' , type=lowerCAmelCase_ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=lowerCAmelCase_ , default='''''' , help='''Can be used for distant debugging.''' ) _snake_case : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case : str = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _snake_case : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case : List[str] = torch.device('''cuda''' , args.local_rank ) _snake_case : int = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case : Optional[int] = nn.parallel.DistributedDataParallel( lowerCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCAmelCase_ ) elif args.n_gpu > 1: _snake_case : List[Any] = nn.DataParallel(lowerCAmelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCAmelCase_ ) torch.save(lowerCAmelCase_ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , lowerCAmelCase_ ) # Prepare dataset _snake_case : Dict = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case : int = (torch.from_numpy(lowerCAmelCase_ ),) _snake_case : Tuple = TensorDataset(*lowerCAmelCase_ ) _snake_case : List[str] = RandomSampler(lowerCAmelCase_ ) _snake_case : Dict = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case : Optional[int] = mask_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) prune_heads(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
1
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller UpperCAmelCase : List[str] = 3 def _a ( lowerCAmelCase_ ): """simple docstring""" print('''Generating primitive root of p''' ) while True: _snake_case : Tuple = random.randrange(3 , lowerCAmelCase_ ) if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1: continue if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1: continue return g def _a ( lowerCAmelCase_ ): """simple docstring""" print('''Generating prime p...''' ) _snake_case : str = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number. _snake_case : int = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p. _snake_case : Tuple = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety. _snake_case : Any = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ ) _snake_case : Optional[Any] = (key_size, e_a, e_a, p) _snake_case : Optional[int] = (key_size, d) return public_key, private_key def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() _snake_case , _snake_case : Tuple = generate_key(lowerCAmelCase_ ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo: fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' , '''w''' ) as fo: fo.write(f'''{private_key[0]},{private_key[1]}''' ) def _a ( ): """simple docstring""" print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
47
'''simple docstring''' def _a ( lowerCAmelCase_ ): """simple docstring""" if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return 0 elif n == 2: return 1 else: _snake_case : Union[str, Any] = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[int] = 0 _snake_case : int = 2 while digits < n: index += 1 _snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) ) return index def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" return fibonacci_digits_index(lowerCAmelCase_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
47
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) UpperCAmelCase : Optional[Any] = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
'''simple docstring''' from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase : Any = TypeVar('T') UpperCAmelCase : str = TypeVar('U') class lowerCamelCase (Generic[T, U] ): def __init__( self , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : str = key _snake_case : Optional[int] = val _snake_case : DoubleLinkedListNode[T, U] | None = None _snake_case : DoubleLinkedListNode[T, U] | None = None def __repr__( self ) -> str: """simple docstring""" return ( F'''Node: key: {self.key}, val: {self.val}, ''' F'''has next: {bool(self.next )}, has prev: {bool(self.prev )}''' ) class lowerCamelCase (Generic[T, U] ): def __init__( self ) -> None: """simple docstring""" _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase__ , lowercase__ ) _snake_case , _snake_case : Union[str, Any] = self.rear, self.head def __repr__( self ) -> str: """simple docstring""" _snake_case : List[Any] = ['''DoubleLinkedList'''] _snake_case : str = self.head while node.next is not None: rep.append(str(lowercase__ ) ) _snake_case : List[str] = node.next rep.append(str(self.rear ) ) return ",\n ".join(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Tuple = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _snake_case : Union[str, Any] = node _snake_case : Optional[Any] = previous _snake_case : int = node _snake_case : Union[str, Any] = self.rear def UpperCAmelCase_ ( self , lowercase__ ) -> DoubleLinkedListNode[T, U] | None: """simple docstring""" if node.prev is None or node.next is None: return None _snake_case : Optional[int] = node.next _snake_case : Any = node.prev _snake_case : List[str] = None _snake_case : Optional[int] = None return node class lowerCamelCase (Generic[T, U] ): _lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {} def __init__( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" _snake_case : DoubleLinkedList[T, U] = DoubleLinkedList() _snake_case : Union[str, Any] = capacity _snake_case : int = 0 _snake_case : Dict = 0 _snake_case : Union[str, Any] = 0 _snake_case : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self ) -> str: """simple docstring""" return ( F'''CacheInfo(hits={self.hits}, misses={self.miss}, ''' F'''capacity={self.capacity}, current size={self.num_keys})''' ) def __contains__( self , lowercase__ ) -> bool: """simple docstring""" return key in self.cache def UpperCAmelCase_ ( self , lowercase__ ) -> U | None: """simple docstring""" if key in self.cache: self.hits += 1 _snake_case : DoubleLinkedListNode[T, U] = self.cache[key] _snake_case : Tuple = self.list.remove(self.cache[key] ) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(lowercase__ ) return node.val self.miss += 1 return None def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> None: """simple docstring""" if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _snake_case : Dict = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(lowercase__ ) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _snake_case : Optional[int] = DoubleLinkedListNode(lowercase__ , lowercase__ ) self.list.add(self.cache[key] ) self.num_keys += 1 else: # bump node to the end of the list, update value _snake_case : Optional[Any] = self.list.remove(self.cache[key] ) assert node is not None # node guaranteed to be in list _snake_case : Optional[Any] = value self.list.add(lowercase__ ) @classmethod def UpperCAmelCase_ ( cls , lowercase__ = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]: """simple docstring""" def cache_decorator_inner(lowercase__ ) -> Callable[..., U]: def cache_decorator_wrapper(*lowercase__ ) -> U: if func not in cls.decorator_function_to_instance_map: _snake_case : Optional[Any] = LRUCache(lowercase__ ) _snake_case : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0] ) if result is None: _snake_case : Tuple = func(*lowercase__ ) cls.decorator_function_to_instance_map[func].put(args[0] , lowercase__ ) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(lowercase__ , '''cache_info''' , lowercase__ ) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
47
1
'''simple docstring''' from __future__ import annotations import typing from collections import Counter def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : typing.Counter[int] = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(lowerCAmelCase_ , max_perimeter + 1 ): _snake_case : List[str] = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(lowerCAmelCase_ ): _snake_case : Optional[Any] = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def _a ( lowerCAmelCase_ = 1_000 ): """simple docstring""" _snake_case : Dict = pythagorean_triple(lowerCAmelCase_ ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"""Perimeter {solution()} has maximum solutions""")
47
'''simple docstring''' import os import numpy import onnx def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = a.name _snake_case : List[Any] = b.name _snake_case : Tuple = '''''' _snake_case : Tuple = '''''' _snake_case : Optional[Any] = a == b _snake_case : List[Any] = name_a _snake_case : str = name_b return res def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(lowerCAmelCase_ , lowerCAmelCase_ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) _graph_replace_input_with(node_proto.attribute[1].g , lowerCAmelCase_ , lowerCAmelCase_ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" for n in graph_proto.node: _node_replace_input_with(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Optional[Any] = list(model.graph.initializer ) _snake_case : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i _snake_case : List[Any] = inits[i].name _snake_case : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , lowerCAmelCase_ , lowerCAmelCase_ ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : Tuple = os.path.dirname(lowerCAmelCase_ ) _snake_case : str = os.path.basename(lowerCAmelCase_ ) _snake_case : Tuple = onnx.load(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) ) _snake_case : Union[str, Any] = list(model.graph.initializer ) _snake_case : Union[str, Any] = set() _snake_case : Any = {} _snake_case : str = [] _snake_case : Union[str, Any] = 0 for i in range(len(lowerCAmelCase_ ) ): if i in dup_set: continue for j in range(i + 1 , len(lowerCAmelCase_ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(lowerCAmelCase_ ) dup_set.add(lowerCAmelCase_ ) _snake_case : List[Any] = inits[j].data_type _snake_case : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('''unexpected data type: ''' , lowerCAmelCase_ ) total_reduced_size += mem_size _snake_case : Union[str, Any] = inits[i].name _snake_case : Any = inits[j].name if name_i in dup_map: dup_map[name_i].append(lowerCAmelCase_ ) else: _snake_case : Union[str, Any] = [name_j] ind_to_replace.append((j, i) ) print('''total reduced size: ''' , total_reduced_size / 1_024 / 1_024 / 1_024 , '''GB''' ) _snake_case : List[str] = sorted(lowerCAmelCase_ ) _remove_dup_initializers_from_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : List[str] = '''optimized_''' + model_file_name _snake_case : List[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) onnx.save(lowerCAmelCase_ , lowerCAmelCase_ ) return new_model
47
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowerCamelCase (a__ ): _lowercase : Dict = """facebook/bart-large-mnli""" _lowercase : Union[str, Any] = ( """This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """ """should be the text to classify, and `labels`, which should be the list of labels to use for classification. """ """It returns the most likely label in the list of provided `labels` for the input text.""" ) _lowercase : Optional[Any] = """text_classifier""" _lowercase : Dict = AutoTokenizer _lowercase : Dict = AutoModelForSequenceClassification _lowercase : str = ["""text""", ["""text"""]] _lowercase : Optional[int] = ["""text"""] def UpperCAmelCase_ ( self ) -> int: """simple docstring""" super().setup() _snake_case : Dict = self.model.config _snake_case : Union[str, Any] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): _snake_case : List[Any] = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Tuple = labels return self.pre_processor( [text] * len(lowercase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" _snake_case : int = outputs.logits _snake_case : Optional[int] = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
47
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : int = { 'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST', 'PegasusXForConditionalGeneration', 'PegasusXModel', 'PegasusXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pegasus_x import ( PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST, PegasusXForConditionalGeneration, PegasusXModel, PegasusXPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers UpperCAmelCase : List[str] = float('nan') class lowerCamelCase : def __init__( self , lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Union[str, Any] = sys.stdout _snake_case : List[Any] = open(lowercase__ , '''a''' ) def __getattr__( self , lowercase__ ) -> Optional[int]: """simple docstring""" return getattr(self.stdout , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Dict: """simple docstring""" self.stdout.write(lowercase__ ) # strip tqdm codes self.file.write(re.sub(r'''^.*\r''' , '''''' , lowercase__ , 0 , re.M ) ) def _a ( lowerCAmelCase_=80 , lowerCAmelCase_=False ): """simple docstring""" _snake_case : Any = [] # deal with critical env vars _snake_case : List[str] = ['''CUDA_VISIBLE_DEVICES'''] for key in env_keys: _snake_case : List[Any] = os.environ.get(lowerCAmelCase_ , lowerCAmelCase_ ) if val is not None: cmd.append(f'''{key}={val}''' ) # python executable (not always needed if the script is executable) _snake_case : Any = sys.executable if full_python_path else sys.executable.split('''/''' )[-1] cmd.append(lowerCAmelCase_ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes _snake_case : Union[str, Any] = [] _snake_case : str = '''''' while len(lowerCAmelCase_ ) > 0: current_line += f'''{cmd.pop(0 )} ''' if len(lowerCAmelCase_ ) == 0 or len(lowerCAmelCase_ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(lowerCAmelCase_ ) _snake_case : Optional[Any] = '''''' return "\\\n".join(lowerCAmelCase_ ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd ) # remove --output_dir if any and set our own _snake_case : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd ) args.base_cmd += f''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir _snake_case : List[str] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) _snake_case : int = subprocess.run(lowerCAmelCase_ , capture_output=lowerCAmelCase_ , text=lowerCAmelCase_ ) if verbose: print('''STDOUT''' , result.stdout ) print('''STDERR''' , result.stderr ) # save the streams _snake_case : Optional[Any] = variation.replace(''' ''' , '''-''' ) with open(Path(lowerCAmelCase_ ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f: f.write(result.stdout ) with open(Path(lowerCAmelCase_ ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('''failed''' ) return {target_metric_key: nan} with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f: _snake_case : Union[str, Any] = json.load(lowerCAmelCase_ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ): """simple docstring""" _snake_case : Optional[Any] = [] _snake_case : Optional[Any] = [] _snake_case : Dict = f'''{id}: {variation:<{longest_variation_len}}''' _snake_case : int = f'''{preamble}: ''' _snake_case : Union[str, Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(lowerCAmelCase_ ) , desc=lowerCAmelCase_ , leave=lowerCAmelCase_ ): _snake_case : Optional[Any] = process_run_single( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Union[str, Any] = single_run_metrics[target_metric_key] if not math.isnan(lowerCAmelCase_ ): metrics.append(lowerCAmelCase_ ) results.append(lowerCAmelCase_ ) outcome += "✓" else: outcome += "✘" _snake_case : Any = f'''\33[2K\r{outcome}''' if len(lowerCAmelCase_ ) > 0: _snake_case : List[str] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} _snake_case : str = round(mean_metrics[target_metric_key] , 2 ) _snake_case : Any = f'''{outcome} {mean_target}''' if len(lowerCAmelCase_ ) > 1: results_str += f''' {tuple(round(lowerCAmelCase_ , 2 ) for x in results )}''' print(lowerCAmelCase_ ) _snake_case : List[Any] = variation return mean_metrics else: print(lowerCAmelCase_ ) return {variation_key: variation, target_metric_key: nan} def _a ( ): """simple docstring""" _snake_case : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) ) return f''' Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Union[str, Any] = pd.DataFrame(lowerCAmelCase_ ) _snake_case : Tuple = '''variation''' _snake_case : Any = '''diff_%''' _snake_case : Any = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan _snake_case : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(lowerCAmelCase_ ): # as a fallback, use the minimal value as the sentinel _snake_case : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(lowerCAmelCase_ ): _snake_case : str = df.apply( lambda lowerCAmelCase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='''columns''' , ) # re-order columns _snake_case : Optional[int] = [variation_key, target_metric_key, diff_key, *report_metric_keys] _snake_case : List[str] = df.reindex(lowerCAmelCase_ , axis='''columns''' ) # reorder cols # capitalize _snake_case : List[str] = df.rename(str.capitalize , axis='''columns''' ) # make the cols as narrow as possible _snake_case : str = df.rename(lambda lowerCAmelCase_ : c.replace('''_''' , '''<br>''' ) , axis='''columns''' ) _snake_case : Any = df.rename(lambda lowerCAmelCase_ : c.replace('''_''' , '''\n''' ) , axis='''columns''' ) _snake_case : List[str] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum'''] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=lowerCAmelCase_ , floatfmt='''.2f''' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=lowerCAmelCase_ , floatfmt='''.2f''' )] print('''\n\n'''.join(lowerCAmelCase_ ) ) def _a ( ): """simple docstring""" _snake_case : Tuple = argparse.ArgumentParser() parser.add_argument( '''--base-cmd''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Base cmd''' , ) parser.add_argument( '''--variations''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , nargs='''+''' , required=lowerCAmelCase_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , ) parser.add_argument( '''--base-variation''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , ) parser.add_argument( '''--target-metric-key''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , ) parser.add_argument( '''--report-metric-keys''' , default='''''' , type=lowerCAmelCase_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , ) parser.add_argument( '''--repeat-times''' , default=1 , type=lowerCAmelCase_ , help='''How many times to re-run each variation - an average will be reported''' , ) parser.add_argument( '''--output_dir''' , default='''output_benchmark''' , type=lowerCAmelCase_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , ) parser.add_argument( '''--verbose''' , default=lowerCAmelCase_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , ) _snake_case : Any = parser.parse_args() _snake_case : Dict = args.output_dir Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) _snake_case : Any = get_base_command(lowerCAmelCase_ , lowerCAmelCase_ ) # split each dimension into its --foo variations _snake_case : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , lowerCAmelCase_ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty _snake_case : int = list(map(str.strip , map(''' '''.join , itertools.product(*lowerCAmelCase_ ) ) ) ) _snake_case : Dict = max(len(lowerCAmelCase_ ) for x in variations ) # split wanted keys _snake_case : str = args.report_metric_keys.split() # capture prints into a log file for convenience _snake_case : Any = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt''' print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(f'''and this script\'s output is also piped into {report_fn}''' ) _snake_case : List[Any] = Tee(lowerCAmelCase_ ) print(f'''\n*** Running {len(lowerCAmelCase_ )} benchmarks:''' ) print(f'''Base command: {" ".join(lowerCAmelCase_ )}''' ) _snake_case : List[Any] = '''variation''' _snake_case : Tuple = [] for id, variation in enumerate(tqdm(lowerCAmelCase_ , desc='''Total completion: ''' , leave=lowerCAmelCase_ ) ): _snake_case : Optional[int] = base_cmd + variation.split() results.append( process_run( id + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.repeat_times , lowerCAmelCase_ , args.verbose , ) ) process_results(lowerCAmelCase_ , args.target_metric_key , lowerCAmelCase_ , args.base_variation , lowerCAmelCase_ ) if __name__ == "__main__": main()
47
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase (a__ ): _lowercase : int = ["""pixel_values"""] def __init__( self , lowercase__ = True , lowercase__ = 32 , lowercase__=PILImageResampling.BILINEAR , lowercase__ = True , **lowercase__ , ) -> None: """simple docstring""" _snake_case : Any = do_resize _snake_case : List[str] = do_rescale _snake_case : Any = size_divisor _snake_case : Optional[Any] = resample super().__init__(**lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" _snake_case , _snake_case : Dict = get_image_size(lowercase__ ) # Rounds the height and width down to the closest multiple of size_divisor _snake_case : Optional[int] = height // size_divisor * size_divisor _snake_case : Dict = width // size_divisor * size_divisor _snake_case : str = resize(lowercase__ , (new_h, new_w) , resample=lowercase__ , data_format=lowercase__ , **lowercase__ ) return image def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ ) -> np.ndarray: """simple docstring""" return rescale(image=lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ) -> BatchFeature: """simple docstring""" _snake_case : Any = do_resize if do_resize is not None else self.do_resize _snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale _snake_case : List[str] = size_divisor if size_divisor is not None else self.size_divisor _snake_case : int = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) _snake_case : Tuple = make_list_of_images(lowercase__ ) if not valid_images(lowercase__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. _snake_case : Tuple = [to_numpy_array(lowercase__ ) for img in images] if do_resize: _snake_case : Optional[int] = [self.resize(lowercase__ , size_divisor=lowercase__ , resample=lowercase__ ) for image in images] if do_rescale: _snake_case : Union[str, Any] = [self.rescale(lowercase__ , scale=1 / 255 ) for image in images] _snake_case : Union[str, Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images] _snake_case : List[str] = {'''pixel_values''': images} return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
47
1
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py UpperCAmelCase : str = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' UpperCAmelCase : str = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' UpperCAmelCase : Optional[int] = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase (datasets.Metric ): def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__=4 , lowercase__=False ) -> Dict: """simple docstring""" _snake_case : List[Any] = compute_bleu( reference_corpus=lowercase__ , translation_corpus=lowercase__ , max_order=lowercase__ , smooth=lowercase__ ) ((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) : str = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class lowerCamelCase : _lowercase : Any = LEDConfig _lowercase : Any = {} _lowercase : Optional[Any] = """gelu""" def __init__( self , lowercase__ , lowercase__=13 , lowercase__=7 , lowercase__=True , lowercase__=False , lowercase__=99 , lowercase__=32 , lowercase__=2 , lowercase__=4 , lowercase__=37 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=20 , lowercase__=2 , lowercase__=1 , lowercase__=0 , lowercase__=4 , ) -> Any: """simple docstring""" _snake_case : Dict = parent _snake_case : Any = batch_size _snake_case : List[str] = seq_length _snake_case : Union[str, Any] = is_training _snake_case : Tuple = use_labels _snake_case : int = vocab_size _snake_case : str = hidden_size _snake_case : Optional[Any] = num_hidden_layers _snake_case : List[Any] = num_attention_heads _snake_case : Optional[int] = intermediate_size _snake_case : List[Any] = hidden_dropout_prob _snake_case : List[str] = attention_probs_dropout_prob _snake_case : Optional[int] = max_position_embeddings _snake_case : Any = eos_token_id _snake_case : List[Any] = pad_token_id _snake_case : Optional[int] = bos_token_id _snake_case : Any = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after _snake_case : Any = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests _snake_case : Tuple = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" _snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _snake_case : Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) _snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _snake_case : List[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) _snake_case : Dict = prepare_led_inputs_dict(lowercase__ , lowercase__ , lowercase__ ) _snake_case : Dict = tf.concat( [tf.zeros_like(lowercase__ )[:, :-1], tf.ones_like(lowercase__ )[:, -1:]] , axis=-1 , ) _snake_case : Dict = global_attention_mask return config, inputs_dict def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> int: """simple docstring""" _snake_case : int = TFLEDModel(config=lowercase__ ).get_decoder() _snake_case : Union[str, Any] = inputs_dict['''input_ids'''] _snake_case : List[str] = input_ids[:1, :] _snake_case : Tuple = inputs_dict['''attention_mask'''][:1, :] _snake_case : Dict = 1 # first forward pass _snake_case : Optional[int] = model(lowercase__ , attention_mask=lowercase__ , use_cache=lowercase__ ) _snake_case , _snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _snake_case : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) _snake_case : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _snake_case : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) _snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _snake_case : List[Any] = model(lowercase__ , attention_mask=lowercase__ )[0] _snake_case : Tuple = model(lowercase__ , attention_mask=lowercase__ , past_key_values=lowercase__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _snake_case : Tuple = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _snake_case : int = output_from_no_past[:, -3:, random_slice_idx] _snake_case : Optional[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase__ , lowercase__ , rtol=1E-3 ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ): """simple docstring""" if attention_mask is None: _snake_case : Union[str, Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _snake_case : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class lowerCamelCase (a__ , a__ , unittest.TestCase ): _lowercase : Optional[int] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () _lowercase : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () _lowercase : Dict = ( { """conversational""": TFLEDForConditionalGeneration, """feature-extraction""": TFLEDModel, """summarization""": TFLEDForConditionalGeneration, """text2text-generation""": TFLEDForConditionalGeneration, """translation""": TFLEDForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : int = True _lowercase : List[Any] = False _lowercase : str = False _lowercase : Union[str, Any] = False def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" _snake_case : str = TFLEDModelTester(self ) _snake_case : Union[str, Any] = ConfigTester(self , config_class=lowercase__ ) def UpperCAmelCase_ ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case , _snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() _snake_case : Any = tf.zeros_like(inputs_dict['''attention_mask'''] ) _snake_case : Optional[Any] = 2 _snake_case : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , ) _snake_case : Dict = True _snake_case : str = self.model_tester.seq_length _snake_case : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(lowercase__ ): _snake_case : Optional[int] = outputs.decoder_attentions self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(lowercase__ ): _snake_case : int = [t.numpy() for t in outputs.encoder_attentions] _snake_case : Tuple = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(lowercase__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: _snake_case : Union[str, Any] = True _snake_case : Dict = False _snake_case : Union[str, Any] = False _snake_case : List[Any] = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) _snake_case : List[Any] = len(lowercase__ ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) if self.is_encoder_decoder: _snake_case : Union[str, Any] = model_class(lowercase__ ) _snake_case : List[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_decoder_attentions_output(lowercase__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] _snake_case : str = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : int = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) # Check attention is always last and order is fine _snake_case : int = True _snake_case : List[str] = True _snake_case : Tuple = model_class(lowercase__ ) _snake_case : Optional[Any] = model(self._prepare_for_class(lowercase__ , lowercase__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowercase__ ) ) self.assertEqual(model.config.output_hidden_states , lowercase__ ) check_encoder_attentions_output(lowercase__ ) @unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" pass def UpperCAmelCase_ ( self ) -> str: """simple docstring""" pass def _a ( lowerCAmelCase_ ): """simple docstring""" return tf.constant(lowerCAmelCase_ , dtype=tf.intaa ) UpperCAmelCase : Dict = 1E-4 @slow @require_tf class lowerCamelCase (unittest.TestCase ): def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : List[str] = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led # change to intended input here _snake_case : List[str] = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Tuple = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : int = model(**lowercase__ )[0] _snake_case : Dict = (1, 1_024, 768) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : List[Any] = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ) # change to intended input here _snake_case : Dict = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : Dict = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] ) _snake_case : List[str] = prepare_led_inputs_dict(model.config , lowercase__ , lowercase__ ) _snake_case : Tuple = model(**lowercase__ )[0] _snake_case : Any = (1, 1_024, model.config.vocab_size) self.assertEqual(output.shape , lowercase__ ) # change to expected output here _snake_case : Dict = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , lowercase__ , atol=1E-3 , rtol=1E-3 )
47
1
'''simple docstring''' import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : Any = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''') _snake_case : Dict = ( ('''layer.''', '''layer_'''), ('''word_embeddings.weight''', '''word_embeddings'''), ('''position_embeddings.weight''', '''position_embeddings'''), ('''token_type_embeddings.weight''', '''token_type_embeddings'''), ('''.''', '''/'''), ('''LayerNorm/weight''', '''LayerNorm/gamma'''), ('''LayerNorm/bias''', '''LayerNorm/beta'''), ('''weight''', '''kernel'''), ) if not os.path.isdir(lowerCAmelCase_ ): os.makedirs(lowerCAmelCase_ ) _snake_case : Tuple = model.state_dict() def to_tf_var_name(lowerCAmelCase_ ): for patt, repl in iter(lowerCAmelCase_ ): _snake_case : List[Any] = name.replace(lowerCAmelCase_ , lowerCAmelCase_ ) return f'''bert/{name}''' def create_tf_var(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case : Optional[int] = tf.dtypes.as_dtype(tensor.dtype ) _snake_case : Any = tf.get_variable(dtype=lowerCAmelCase_ , shape=tensor.shape , name=lowerCAmelCase_ , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(lowerCAmelCase_ ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: _snake_case : Dict = to_tf_var_name(lowerCAmelCase_ ) _snake_case : Tuple = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): _snake_case : Any = torch_tensor.T _snake_case : Dict = create_tf_var(tensor=lowerCAmelCase_ , name=lowerCAmelCase_ , session=lowerCAmelCase_ ) tf.keras.backend.set_value(lowerCAmelCase_ , lowerCAmelCase_ ) _snake_case : Optional[Any] = session.run(lowerCAmelCase_ ) print(f'''Successfully created {tf_name}: {np.allclose(lowerCAmelCase_ , lowerCAmelCase_ )}''' ) _snake_case : Optional[Any] = tf.train.Saver(tf.trainable_variables() ) saver.save(lowerCAmelCase_ , os.path.join(lowerCAmelCase_ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def _a ( lowerCAmelCase_=None ): """simple docstring""" _snake_case : Tuple = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Directory in which to save tensorflow model''' ) _snake_case : Any = parser.parse_args(lowerCAmelCase_ ) _snake_case : Tuple = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
47
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Any = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } UpperCAmelCase : Optional[Any] = { 'gpt-neox-20b': 2_0_4_8, } class lowerCamelCase (a__ ): _lowercase : Optional[int] = VOCAB_FILES_NAMES _lowercase : str = PRETRAINED_VOCAB_FILES_MAP _lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : Optional[int] = ["""input_ids""", """attention_mask"""] def __init__( self , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__="<|endoftext|>" , lowercase__=False , **lowercase__ , ) -> List[Any]: """simple docstring""" super().__init__( lowercase__ , lowercase__ , tokenizer_file=lowercase__ , unk_token=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , add_prefix_space=lowercase__ , **lowercase__ , ) _snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowercase__ ) != add_prefix_space: _snake_case : int = getattr(lowercase__ , pre_tok_state.pop('''type''' ) ) _snake_case : int = add_prefix_space _snake_case : Optional[Any] = pre_tok_class(**lowercase__ ) _snake_case : List[str] = add_prefix_space def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" _snake_case : Optional[int] = self._tokenizer.model.save(lowercase__ , name=lowercase__ ) return tuple(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> List[int]: """simple docstring""" _snake_case : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase__ , add_special_tokens=lowercase__ ) + [self.eos_token_id] ) if len(lowercase__ ) > self.model_max_length: _snake_case : Dict = input_ids[-self.model_max_length :] return input_ids
47
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = [] for line in lines: _snake_case : Tuple = re.sub(R'''#.*''' , '''''' , lowerCAmelCase_ ) # remove comments if line: filtered_lines.append(lowerCAmelCase_ ) _snake_case : Dict = '''\n'''.join(lowerCAmelCase_ ) # Make a hash from all this code _snake_case : Union[str, Any] = full_str.encode('''utf-8''' ) return shaaaa(lowerCAmelCase_ ).hexdigest() # get importable module names and hash for caching UpperCAmelCase : List[Any] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions UpperCAmelCase : List[str] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) UpperCAmelCase : str = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name UpperCAmelCase : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
47
'''simple docstring''' import math from numpy import inf from scipy.integrate import quad def _a ( lowerCAmelCase_ ): """simple docstring""" if num <= 0: raise ValueError('''math domain error''' ) return quad(lowerCAmelCase_ , 0 , lowerCAmelCase_ , args=(lowerCAmelCase_) )[0] def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return math.pow(lowerCAmelCase_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
47
1
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" def run_func(lowerCAmelCase_ ): @wraps(lowerCAmelCase_ ) def run_in_eager_mode(*lowerCAmelCase_ , **lowerCAmelCase_ ): return func(*lowerCAmelCase_ , **lowerCAmelCase_ ) @wraps(lowerCAmelCase_ ) @tf.function(experimental_compile=lowerCAmelCase_ ) def run_in_graph_mode(*lowerCAmelCase_ , **lowerCAmelCase_ ): return func(*lowerCAmelCase_ , **lowerCAmelCase_ ) if do_eager_mode is True: if use_xla is not False: raise ValueError( '''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' ) return run_in_eager_mode else: return run_in_graph_mode return run_func def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" _snake_case : List[Any] = random.Random() _snake_case : Union[str, Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(lowerCAmelCase_ , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowerCamelCase (a__ ): _lowercase : TensorFlowBenchmarkArguments _lowercase : PretrainedConfig _lowercase : str = "TensorFlow" @property def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" return tf.__version__ def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> float: """simple docstring""" _snake_case : Optional[Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) _snake_case : Optional[Any] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__ ) return self._measure_speed(_inference ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> float: """simple docstring""" _snake_case : Dict = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) _snake_case : Tuple = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__ ) return self._measure_speed(_train ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> [Memory, Optional[MemorySummary]]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__ ) _snake_case : Optional[int] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) _snake_case : List[Any] = self._prepare_inference_func(lowercase__ , lowercase__ , lowercase__ ) return self._measure_memory(_inference ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> [Memory, Optional[MemorySummary]]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowercase__ ) _snake_case : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' ) _snake_case : List[str] = self._prepare_train_func(lowercase__ , lowercase__ , lowercase__ ) return self._measure_memory(_train ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Callable[[], None]: """simple docstring""" _snake_case : Optional[int] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) _snake_case : List[Any] = ( hasattr(lowercase__ , '''architectures''' ) and isinstance(config.architectures , lowercase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : int = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : Dict = __import__('''transformers''' , fromlist=[model_class] ) _snake_case : List[Any] = getattr(lowercase__ , lowercase__ ) _snake_case : Tuple = model_cls(lowercase__ ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: _snake_case : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](lowercase__ ) # encoder-decoder has vocab size saved differently _snake_case : Optional[Any] = config.vocab_size if hasattr(lowercase__ , '''vocab_size''' ) else config.encoder.vocab_size _snake_case : List[str] = random_input_ids(lowercase__ , lowercase__ , lowercase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(lowercase__ , decoder_input_ids=lowercase__ , training=lowercase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(lowercase__ , training=lowercase__ ) _snake_case : List[Any] = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> Callable[[], None]: """simple docstring""" _snake_case : int = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' ) if self.args.fpaa: raise NotImplementedError('''Mixed precision is currently not supported.''' ) _snake_case : List[str] = ( hasattr(lowercase__ , '''architectures''' ) and isinstance(config.architectures , lowercase__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _snake_case : Dict = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model _snake_case : List[Any] = __import__('''transformers''' , fromlist=[model_class] ) _snake_case : str = getattr(lowercase__ , lowercase__ ) _snake_case : Optional[int] = model_cls(lowercase__ ) except ImportError: raise ImportError( F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' ''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' ) else: _snake_case : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowercase__ ) # encoder-decoder has vocab size saved differently _snake_case : Union[str, Any] = config.vocab_size if hasattr(lowercase__ , '''vocab_size''' ) else config.encoder.vocab_size _snake_case : Any = random_input_ids(lowercase__ , lowercase__ , lowercase__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _snake_case : Optional[int] = model(lowercase__ , decoder_input_ids=lowercase__ , labels=lowercase__ , training=lowercase__ )[0] _snake_case : List[str] = tf.gradients(lowercase__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _snake_case : List[str] = model(lowercase__ , labels=lowercase__ , training=lowercase__ )[0] _snake_case : Optional[int] = tf.gradients(lowercase__ , model.trainable_variables ) return gradients _snake_case : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def UpperCAmelCase_ ( self , lowercase__ ) -> float: """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' ) timeit.repeat(lowercase__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _snake_case : Tuple = timeit.repeat( lowercase__ , repeat=self.args.repeat , number=10 , ) return min(lowercase__ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) def UpperCAmelCase_ ( self , lowercase__ ) -> [Memory, MemorySummary]: """simple docstring""" logger.info( '''Note that TensorFlow allocates more memory than ''' '''it might need to speed up computation. ''' '''The memory reported here corresponds to the memory ''' '''reported by `nvidia-smi`, which can vary depending ''' '''on total available memory on the GPU that is used.''' ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( '''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory''' ''' consumption line by line.''' ) _snake_case : List[Any] = start_memory_tracing('''transformers''' ) if self.args.is_tpu: # tpu raise NotImplementedError( '''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking''' ''' with `args.memory=False`''' ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( '''py3nvml not installed, we won\'t log GPU memory usage. ''' '''Install py3nvml (pip install py3nvml) to log information about GPU.''' ) _snake_case : str = '''N/A''' else: logger.info( '''Measuring total GPU usage on GPU device. Make sure to not have additional processes''' ''' running on the same GPU.''' ) # init nvml nvml.nvmlInit() func() _snake_case : Optional[int] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _snake_case : Tuple = nvml.nvmlDeviceGetMemoryInfo(lowercase__ ) _snake_case : Any = meminfo.used _snake_case : str = Memory(lowercase__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( '''When enabling line by line tracing, the max peak memory for CPU is inaccurate in''' ''' TensorFlow.''' ) _snake_case : int = None else: _snake_case : Optional[int] = measure_peak_memory_cpu(lowercase__ ) _snake_case : List[str] = Memory(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else memory_bytes if self.args.trace_memory_line_by_line: _snake_case : Optional[int] = stop_memory_tracing(lowercase__ ) if memory is None: _snake_case : Any = summary.total else: _snake_case : Dict = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
47
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase (unittest.TestCase ): @slow def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Union[str, Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModel.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = AutoModel.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Optional[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = TFAutoModelForPreTraining.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = AutoModelForPreTraining.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Union[str, Any]: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : Optional[int] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[Any] = TFAutoModelForCausalLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : Tuple = TFAutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[Any] = AutoModelForCausalLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[Any] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = TFAutoModelForMaskedLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : int = AutoModelForMaskedLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Optional[int] = AutoModelForMaskedLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> List[str]: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case : List[str] = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_pt=lowercase__ ) _snake_case , _snake_case : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : List[str] = AutoModelForSeqaSeqLM.from_pretrained(lowercase__ , from_tf=lowercase__ ) _snake_case , _snake_case : Dict = AutoModelForSeqaSeqLM.from_pretrained( lowercase__ , output_loading_info=lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : Any = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Any = TFAutoModelForSequenceClassification.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Dict = AutoModelForSequenceClassification.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) @slow def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" for model_name in ["bert-base-uncased"]: _snake_case : str = AutoConfig.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : str = TFAutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) _snake_case : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsNotNone(lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : Tuple = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[str] = TFAutoModelWithLMHead.from_pretrained(lowercase__ , from_pt=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 ) _snake_case : int = AutoModelWithLMHead.from_pretrained(lowercase__ , from_tf=lowercase__ ) self.assertIsInstance(lowercase__ , lowercase__ ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=lowercase__ ) , 14_410 )
47
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_nllb import NllbTokenizer else: UpperCAmelCase : Any = None UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[Any] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : List[str] = { 'vocab_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/nllb-200-distilled-600M': ( 'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json' ), }, } UpperCAmelCase : int = { 'facebook/nllb-large-en-ro': 1_0_2_4, 'facebook/nllb-200-distilled-600M': 1_0_2_4, } # fmt: off UpperCAmelCase : Optional[int] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn'] class lowerCamelCase (a__ ): _lowercase : Dict = VOCAB_FILES_NAMES _lowercase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP _lowercase : Optional[Any] = ["""input_ids""", """attention_mask"""] _lowercase : List[Any] = NllbTokenizer _lowercase : List[int] = [] _lowercase : List[int] = [] def __init__( self , lowercase__=None , lowercase__=None , lowercase__="<s>" , lowercase__="</s>" , lowercase__="</s>" , lowercase__="<s>" , lowercase__="<unk>" , lowercase__="<pad>" , lowercase__="<mask>" , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=False , **lowercase__ , ) -> Tuple: """simple docstring""" _snake_case : List[Any] = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token _snake_case : Union[str, Any] = legacy_behaviour super().__init__( vocab_file=lowercase__ , tokenizer_file=lowercase__ , bos_token=lowercase__ , eos_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , unk_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , legacy_behaviour=lowercase__ , **lowercase__ , ) _snake_case : Tuple = vocab_file _snake_case : int = False if not self.vocab_file else True _snake_case : Tuple = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} ) _snake_case : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(lowercase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } _snake_case : List[str] = src_lang if src_lang is not None else '''eng_Latn''' _snake_case : List[str] = self.convert_tokens_to_ids(self._src_lang ) _snake_case : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def UpperCAmelCase_ ( self ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : str = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> List[int]: """simple docstring""" _snake_case : List[str] = [self.sep_token_id] _snake_case : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , **lowercase__ ) -> Any: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' ) _snake_case : List[str] = src_lang _snake_case : Optional[Any] = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ ) _snake_case : str = self.convert_tokens_to_ids(lowercase__ ) _snake_case : str = tgt_lang_id return inputs def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = "eng_Latn" , lowercase__ = None , lowercase__ = "fra_Latn" , **lowercase__ , ) -> BatchEncoding: """simple docstring""" _snake_case : Optional[Any] = src_lang _snake_case : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : List[Any] = self.convert_tokens_to_ids(lowercase__ ) if self.legacy_behaviour: _snake_case : Any = [] _snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code] else: _snake_case : int = [self.cur_lang_code] _snake_case : List[Any] = [self.eos_token_id] _snake_case : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens ) _snake_case : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) _snake_case : Dict = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : str = self.convert_tokens_to_ids(lowercase__ ) if self.legacy_behaviour: _snake_case : List[str] = [] _snake_case : Optional[int] = [self.eos_token_id, self.cur_lang_code] else: _snake_case : int = [self.cur_lang_code] _snake_case : str = [self.eos_token_id] _snake_case : Any = self.convert_ids_to_tokens(self.prefix_tokens ) _snake_case : int = self.convert_ids_to_tokens(self.suffix_tokens ) _snake_case : Dict = processors.TemplateProcessing( single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(lowercase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' ) return _snake_case : Optional[int] = os.path.join( lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ): copyfile(self.vocab_file , lowercase__ ) return (out_vocab_file,)
47
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : Dict = {'configuration_timm_backbone': ['TimmBackboneConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = ['TimmBackbone'] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
47
1
'''simple docstring''' from __future__ import annotations from fractions import Fraction def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def _a ( lowerCAmelCase_ ): """simple docstring""" _snake_case : int = [] _snake_case : Tuple = 11 _snake_case : Optional[int] = int('''1''' + '''0''' * digit_len ) for num in range(lowerCAmelCase_ , lowerCAmelCase_ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(lowerCAmelCase_ , lowerCAmelCase_ ): solutions.append(f'''{num}/{den}''' ) den += 1 num += 1 _snake_case : int = 10 return solutions def _a ( lowerCAmelCase_ = 2 ): """simple docstring""" _snake_case : Optional[int] = 1.0 for fraction in fraction_list(lowerCAmelCase_ ): _snake_case : Optional[Any] = Fraction(lowerCAmelCase_ ) result *= frac.denominator / frac.numerator return int(lowerCAmelCase_ ) if __name__ == "__main__": print(solution())
47
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version UpperCAmelCase : Tuple = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') UpperCAmelCase : str = { 'base': AutoModel, 'sequence-classification': AutoModelForSequenceClassification, 'question-answering': AutoModelForQuestionAnswering, 'pretraining': AutoModelForPreTraining, 'token-classification': AutoModelForTokenClassification, 'language-modeling': AutoModelWithLMHead, 'summarization': AutoModelForSeqaSeqLM, 'translation': AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization UpperCAmelCase : Optional[Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } UpperCAmelCase : Tuple = sorted(arg_to_scheduler.keys()) UpperCAmelCase : Optional[Any] = '{' + ', '.join(arg_to_scheduler_choices) + '}' class lowerCamelCase (pl.LightningModule ): def __init__( self , lowercase__ , lowercase__=None , lowercase__="base" , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ , ) -> Optional[int]: """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowercase__ ) _snake_case : Union[str, Any] = 0 _snake_case : int = Path(self.hparams.output_dir ) _snake_case : int = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _snake_case : Tuple = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase__ , **lowercase__ , ) else: _snake_case : PretrainedConfig = config _snake_case : Optional[Any] = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(self.hparams , lowercase__ , lowercase__ ): assert hasattr(self.config , lowercase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowercase__ , getattr(self.hparams , lowercase__ ) ) if tokenizer is None: _snake_case : Optional[int] = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase__ , ) else: _snake_case : PreTrainedTokenizer = tokenizer _snake_case : Any = MODEL_MODES[mode] if model is None: _snake_case : List[Any] = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase__ , ) else: _snake_case : Optional[Any] = model def UpperCAmelCase_ ( self , *lowercase__ , **lowercase__ ) -> List[str]: """simple docstring""" _snake_case : Dict = self.model_type.from_pretrained(*lowercase__ , **lowercase__ ) def UpperCAmelCase_ ( self ) -> List[Any]: """simple docstring""" _snake_case : Optional[int] = arg_to_scheduler[self.hparams.lr_scheduler] _snake_case : Optional[int] = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _snake_case : str = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1} return scheduler def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = self.model _snake_case : List[Any] = ['''bias''', '''LayerNorm.weight'''] _snake_case : List[str] = [ { '''params''': [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters '''weight_decay''': self.hparams.weight_decay, }, { '''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] if self.hparams.adafactor: _snake_case : Any = Adafactor( lowercase__ , lr=self.hparams.learning_rate , scale_parameter=lowercase__ , relative_step=lowercase__ ) else: _snake_case : List[str] = AdamW( lowercase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _snake_case : List[str] = optimizer _snake_case : Any = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" return self.validation_step(lowercase__ , lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" return self.validation_end(lowercase__ ) def UpperCAmelCase_ ( self ) -> int: """simple docstring""" _snake_case : Any = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _snake_case : Optional[int] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCAmelCase_ ( self , lowercase__ ) -> Any: """simple docstring""" if stage == "test": _snake_case : Any = len(self.test_dataloader().dataset ) else: _snake_case : Dict = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase__ ) _snake_case : Optional[int] = len(self.train_dataloader().dataset ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ = False ) -> str: """simple docstring""" raise NotImplementedError('''You must implement this for your task''' ) def UpperCAmelCase_ ( self ) -> Optional[int]: """simple docstring""" return self.train_loader def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self ) -> Optional[Any]: """simple docstring""" return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ ) -> Optional[int]: """simple docstring""" return os.path.join( self.hparams.data_dir , '''cached_{}_{}_{}'''.format( lowercase__ , list(filter(lowercase__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCAmelCase_ ( self , lowercase__ ) -> None: """simple docstring""" _snake_case : Dict = self.output_dir.joinpath('''best_tfmr''' ) _snake_case : Tuple = self.step_count self.model.save_pretrained(lowercase__ ) self.tokenizer.save_pretrained(lowercase__ ) @staticmethod def UpperCAmelCase_ ( lowercase__ , lowercase__ ) -> Tuple: """simple docstring""" parser.add_argument( '''--model_name_or_path''' , default=lowercase__ , type=lowercase__ , required=lowercase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--config_name''' , default='''''' , type=lowercase__ , help='''Pretrained config name or path if not the same as model_name''' ) parser.add_argument( '''--tokenizer_name''' , default=lowercase__ , type=lowercase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , ) parser.add_argument( '''--cache_dir''' , default=str(Path(lowercase__ ).parent / '''test_run''' / '''cache''' ) , type=lowercase__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , ) parser.add_argument( '''--encoder_layerdrop''' , type=lowercase__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--decoder_layerdrop''' , type=lowercase__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--dropout''' , type=lowercase__ , help='''Dropout probability (Optional). Goes into model.config''' , ) parser.add_argument( '''--attention_dropout''' , type=lowercase__ , help='''Attention dropout probability (Optional). Goes into model.config''' , ) parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase__ , help='''The initial learning rate for Adam.''' ) parser.add_argument( '''--lr_scheduler''' , default='''linear''' , choices=lowercase__ , metavar=lowercase__ , type=lowercase__ , help='''Learning rate scheduler''' , ) parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase__ , help='''Weight decay if we apply some.''' ) parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--num_workers''' , default=4 , type=lowercase__ , help='''kwarg passed to DataLoader''' ) parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase__ ) parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase__ ) parser.add_argument('''--adafactor''' , action='''store_true''' ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> str: """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowercase__ ) class lowerCamelCase (pl.Callback ): def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Any: """simple docstring""" _snake_case : Any = trainer.lr_schedulers[0]['''scheduler'''] _snake_case : Optional[int] = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowercase__ ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> List[str]: """simple docstring""" rank_zero_info('''***** Validation results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log results for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" rank_zero_info('''***** Test results *****''' ) _snake_case : Dict = trainer.callback_metrics # Log and save results to file _snake_case : str = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' ) with open(lowercase__ , '''w''' ) as writer: for key in sorted(lowercase__ ): if key not in ["log", "progress_bar"]: rank_zero_info('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) writer.write('''{} = {}\n'''.format(lowercase__ , str(metrics[key] ) ) ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" parser.add_argument( '''--output_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument( '''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , ) parser.add_argument( '''--fp16_opt_level''' , type=lowerCAmelCase_ , default='''O2''' , help=( '''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].''' '''See details at https://nvidia.github.io/apex/amp.html''' ) , ) parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase_ ) parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase_ , help='''Max gradient norm''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' ) parser.add_argument( '''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--seed''' , type=lowerCAmelCase_ , default=42 , help='''random seed for initialization''' ) parser.add_argument( '''--data_dir''' , default=str(Path(lowerCAmelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , ) def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[] , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , ): """simple docstring""" pl.seed_everything(args.seed ) # init model _snake_case : Union[str, Any] = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowerCAmelCase_ ) # add custom checkpoints if checkpoint_callback is None: _snake_case : Any = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowerCAmelCase_ ) if logging_callback is None: _snake_case : str = LoggingCallback() _snake_case : Tuple = {} if args.fpaa: _snake_case : Union[str, Any] = 16 if args.gpus > 1: _snake_case : Optional[Any] = '''auto''' _snake_case : Tuple = '''ddp''' _snake_case : Optional[Any] = args.accumulate_grad_batches _snake_case : Tuple = None _snake_case : str = '''auto''' _snake_case : int = pl.Trainer.from_argparse_args( lowerCAmelCase_ , weights_summary=lowerCAmelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase_ , ) if args.do_train: trainer.fit(lowerCAmelCase_ ) else: print('''RAG modeling tests with new set functions successfuly executed!''' ) return trainer
47
1
'''simple docstring''' import math def _a ( lowerCAmelCase_ , lowerCAmelCase_ ): """simple docstring""" if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(SCREAMING_SNAKE_CASE_ ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCAmelCase : Dict = "Enter the base and the power separated by a comma: " UpperCAmelCase : Dict = map(int, input(prompt).split(',')) UpperCAmelCase : Union[str, Any] = map(int, input(prompt).split(',')) # We find the log of each number, using the function res(), which takes two # arguments. UpperCAmelCase : int = res(xa, ya) UpperCAmelCase : Optional[Any] = res(xa, ya) # We check for the largest number if resa > resa: print('Largest number is', xa, '^', ya) elif resa > resa: print('Largest number is', xa, '^', ya) else: print('Both are equal')
700
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Dict = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class lowerCamelCase (a__ ): _lowercase : List[str] = """sew-d""" def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__=2 , lowercase__=512 , lowercase__=256 , lowercase__=True , lowercase__=True , lowercase__=("p2c", "c2p") , lowercase__="layer_norm" , lowercase__="gelu_python" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.02 , lowercase__=1E-7 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowercase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowercase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=True , lowercase__=0.05 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__="mean" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=0 , lowercase__=1 , lowercase__=2 , **lowercase__ , ) -> Dict: """simple docstring""" super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ ) _snake_case : List[str] = hidden_size _snake_case : Optional[Any] = feat_extract_norm _snake_case : Tuple = feat_extract_activation _snake_case : Tuple = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = list(lowercase__ ) _snake_case : Any = conv_bias _snake_case : List[Any] = num_conv_pos_embeddings _snake_case : Any = num_conv_pos_embedding_groups _snake_case : Union[str, Any] = len(self.conv_dim ) _snake_case : Optional[Any] = num_hidden_layers _snake_case : Optional[int] = intermediate_size _snake_case : Any = squeeze_factor _snake_case : Optional[Any] = max_position_embeddings _snake_case : Tuple = position_buckets _snake_case : Tuple = share_att_key _snake_case : Any = relative_attention _snake_case : Optional[int] = norm_rel_ebd _snake_case : Optional[Any] = list(lowercase__ ) _snake_case : List[Any] = hidden_act _snake_case : List[Any] = num_attention_heads _snake_case : Dict = hidden_dropout _snake_case : Tuple = attention_dropout _snake_case : Union[str, Any] = activation_dropout _snake_case : List[Any] = feat_proj_dropout _snake_case : Optional[int] = final_dropout _snake_case : Optional[Any] = layer_norm_eps _snake_case : Dict = feature_layer_norm_eps _snake_case : List[Any] = initializer_range _snake_case : Dict = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _snake_case : Union[str, Any] = apply_spec_augment _snake_case : Any = mask_time_prob _snake_case : List[str] = mask_time_length _snake_case : Dict = mask_time_min_masks _snake_case : Union[str, Any] = mask_feature_prob _snake_case : Tuple = mask_feature_length _snake_case : Union[str, Any] = mask_feature_min_masks # ctc loss _snake_case : Optional[Any] = ctc_loss_reduction _snake_case : Optional[Any] = ctc_zero_infinity # sequence classification _snake_case : List[Any] = use_weighted_layer_sum _snake_case : Any = classifier_proj_size @property def UpperCAmelCase_ ( self ) -> Any: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
47
0