code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int | float: """simple docstring""" if len(__snake_case ) == 0: raise ValueError('''find_max() arg is an empty sequence''' ) if ( left >= len(__snake_case ) or left < -len(__snake_case ) or right >= len(__snake_case ) or right < -len(__snake_case ) ): raise IndexError('''list index out of range''' ) if left == right: return nums[left] _UpperCamelCase = (left + right) >> 1 # the middle _UpperCamelCase = find_max(__snake_case, __snake_case, __snake_case ) # find max in range[left, mid] _UpperCamelCase = find_max(__snake_case, mid + 1, __snake_case ) # find max in range[mid + 1, right] return left_max if left_max >= right_max else right_max if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
707
"""simple docstring""" from importlib import import_module from .logging import get_logger _a = get_logger(__name__) class _UpperCAmelCase: def __init__( self , __a , __a=None) -> Dict: '''simple docstring''' _UpperCamelCase = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__'''): setattr(self , __a , getattr(__a , __a)) _UpperCamelCase = module._original_module if isinstance(__a , _PatchedModuleObj) else module class _UpperCAmelCase: lowercase__ = [] def __init__( self , __a , __a , __a , __a=None) -> List[str]: '''simple docstring''' _UpperCamelCase = obj _UpperCamelCase = target _UpperCamelCase = new _UpperCamelCase = target.split('''.''')[0] _UpperCamelCase = {} _UpperCamelCase = attrs or [] def __enter__( self) -> int: '''simple docstring''' *_UpperCamelCase , _UpperCamelCase = self.target.split('''.''') # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a)): try: _UpperCamelCase = import_module('''.'''.join(submodules[: i + 1])) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCamelCase = getattr(self.obj , __a) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj) and obj_attr._original_module is submodule) ): _UpperCamelCase = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs)) _UpperCamelCase = getattr(self.obj , __a) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a) , attrs=self.attrs)) _UpperCamelCase = getattr(__a , __a) # finally set the target attribute setattr(__a , __a , self.new) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCamelCase = getattr(import_module('''.'''.join(__a)) , __a) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a) is attr_value: _UpperCamelCase = getattr(self.obj , __a) setattr(self.obj , __a , self.new) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCamelCase = globals()['''__builtins__'''][target_attr] setattr(self.obj , __a , self.new) else: raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''') def __exit__( self , *__a) -> Tuple: '''simple docstring''' for attr in list(self.original): setattr(self.obj , __a , self.original.pop(__a)) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' self.__enter__() self._active_patches.append(self) def UpperCAmelCase ( self) -> str: '''simple docstring''' try: self._active_patches.remove(self) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
78
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import XLMRobertaTokenizer from diffusers import ( AltDiffusionImgaImgPipeline, AutoencoderKL, PNDMScheduler, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = 1 _UpperCamelCase = 3 _UpperCamelCase = (32, 32) _UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(__a) return image @property def UpperCAmelCase ( self) -> str: '''simple docstring''' torch.manual_seed(0) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCAmelCase ( self) -> str: '''simple docstring''' torch.manual_seed(0) _UpperCamelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , ) return RobertaSeriesModelWithTransformation(__a) @property def UpperCAmelCase ( self) -> str: '''simple docstring''' def extract(*__a , **__a): class _UpperCAmelCase: def __init__( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = torch.ones([0]) def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' self.pixel_values.to(__a) return self return Out() return extract def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.dummy_cond_unet _UpperCamelCase = PNDMScheduler(skip_prk_steps=__a) _UpperCamelCase = self.dummy_vae _UpperCamelCase = self.dummy_text_encoder _UpperCamelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''') _UpperCamelCase = 77 _UpperCamelCase = self.dummy_image.to(__a) _UpperCamelCase = init_image / 2 + 0.5 # make sure here that pndm scheduler skips prk _UpperCamelCase = AltDiffusionImgaImgPipeline( unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , ) _UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a) _UpperCamelCase = alt_pipe.to(__a) alt_pipe.set_progress_bar_config(disable=__a) _UpperCamelCase = '''A painting of a squirrel eating a burger''' _UpperCamelCase = torch.Generator(device=__a).manual_seed(0) _UpperCamelCase = alt_pipe( [prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__a , ) _UpperCamelCase = output.images _UpperCamelCase = torch.Generator(device=__a).manual_seed(0) _UpperCamelCase = alt_pipe( [prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__a , return_dict=__a , )[0] _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-3 @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''') def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = self.dummy_cond_unet _UpperCamelCase = PNDMScheduler(skip_prk_steps=__a) _UpperCamelCase = self.dummy_vae _UpperCamelCase = self.dummy_text_encoder _UpperCamelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''') _UpperCamelCase = 77 _UpperCamelCase = self.dummy_image.to(__a) # put models in fp16 _UpperCamelCase = unet.half() _UpperCamelCase = vae.half() _UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk _UpperCamelCase = AltDiffusionImgaImgPipeline( unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , ) _UpperCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a) _UpperCamelCase = alt_pipe.to(__a) alt_pipe.set_progress_bar_config(disable=__a) _UpperCamelCase = '''A painting of a squirrel eating a burger''' _UpperCamelCase = torch.manual_seed(0) _UpperCamelCase = alt_pipe( [prompt] , generator=__a , num_inference_steps=2 , output_type='''np''' , image=__a , ).images assert image.shape == (1, 32, 32, 3) @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''') def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') # resize to resolution that is divisible by 8 but not 16 or 32 _UpperCamelCase = init_image.resize((7_60, 5_04)) _UpperCamelCase = '''BAAI/AltDiffusion''' _UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained( __a , safety_checker=__a , ) pipe.to(__a) pipe.set_progress_bar_config(disable=__a) pipe.enable_attention_slicing() _UpperCamelCase = '''A fantasy landscape, trending on artstation''' _UpperCamelCase = torch.manual_seed(0) _UpperCamelCase = pipe( prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type='''np''' , ) _UpperCamelCase = output.images[0] _UpperCamelCase = image[2_55:2_58, 3_83:3_86, -1] assert image.shape == (5_04, 7_60, 3) _UpperCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @slow @require_torch_gpu class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''') _UpperCamelCase = init_image.resize((7_68, 5_12)) _UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''') _UpperCamelCase = '''BAAI/AltDiffusion''' _UpperCamelCase = AltDiffusionImgaImgPipeline.from_pretrained( __a , safety_checker=__a , ) pipe.to(__a) pipe.set_progress_bar_config(disable=__a) pipe.enable_attention_slicing() _UpperCamelCase = '''A fantasy landscape, trending on artstation''' _UpperCamelCase = torch.manual_seed(0) _UpperCamelCase = pipe( prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type='''np''' , ) _UpperCamelCase = output.images[0] assert image.shape == (5_12, 7_68, 3) # img2img is flaky across GPUs even in fp32, so using MAE here assert np.abs(expected_image - image).max() < 1e-2
708
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
78
0
"""simple docstring""" _a = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
709
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _a = logging.get_logger(__name__) _a = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'gpt_neo' lowercase__ = ['past_key_values'] lowercase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , __a=5_02_57 , __a=20_48 , __a=20_48 , __a=24 , __a=[[["global", "local"], 12]] , __a=16 , __a=None , __a=2_56 , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=0.1 , __a=1e-5 , __a=0.02 , __a=True , __a=5_02_56 , __a=5_02_56 , **__a , ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_layers _UpperCamelCase = num_heads _UpperCamelCase = intermediate_size _UpperCamelCase = window_size _UpperCamelCase = activation_function _UpperCamelCase = resid_dropout _UpperCamelCase = embed_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = layer_norm_epsilon _UpperCamelCase = initializer_range _UpperCamelCase = use_cache _UpperCamelCase = bos_token_id _UpperCamelCase = eos_token_id _UpperCamelCase = attention_types _UpperCamelCase = self.expand_attention_types_params(__a) if len(self.attention_layers) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''') super().__init__(bos_token_id=__a , eos_token_id=__a , **__a) @staticmethod def UpperCAmelCase ( __a) -> int: '''simple docstring''' _UpperCamelCase = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> str: """simple docstring""" import torch _UpperCamelCase = input.size() _UpperCamelCase = len(__snake_case ) _UpperCamelCase = shape[dimension] _UpperCamelCase = torch.arange(0, __snake_case, __snake_case ) _UpperCamelCase = torch.div(sizedim - size, __snake_case, rounding_mode='''floor''' ) + 1 _UpperCamelCase = torch.arange(__snake_case ) + low_indices[:min_length][:, None] _UpperCamelCase = [slice(__snake_case )] * rank _UpperCamelCase = indices _UpperCamelCase = input[s] _UpperCamelCase = list(range(0, rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> str: """simple docstring""" import torch _UpperCamelCase = torch.arange(1, __snake_case ) _UpperCamelCase = torch.remainder(__snake_case, __snake_case ) _UpperCamelCase = remainders == 0 _UpperCamelCase = candidates[divisor_indices] _UpperCamelCase = torch.max(__snake_case ) return largest_divisor, torch.div(__snake_case, __snake_case, rounding_mode='''floor''' ) class _UpperCAmelCase( lowerCamelCase ): @property def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}}) if self.use_past: self.fill_with_past_key_values_(__a , direction='''inputs''') _UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return self._config.num_heads def UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]: '''simple docstring''' _UpperCamelCase = super(__a , self).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a) # We need to order the input in the way they appears in the forward() _UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch _UpperCamelCase , _UpperCamelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _UpperCamelCase = seqlen + 2 _UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCamelCase = [ (torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers) ] _UpperCamelCase = common_inputs['''attention_mask'''] if self.use_past: _UpperCamelCase = ordered_inputs['''attention_mask'''].dtype _UpperCamelCase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__a , __a , dtype=__a)] , dim=1) return ordered_inputs @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return 13
78
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, ) -> tuple: """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
710
"""simple docstring""" import sys from collections import defaultdict class _UpperCAmelCase: def __init__( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = pos def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCamelCase = 2 * start + 1 else: _UpperCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child] _UpperCamelCase , _UpperCamelCase = ( heap[start], positions[start], ) _UpperCamelCase , _UpperCamelCase = temp, tempa _UpperCamelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , __a) self.top_to_bottom(__a , __a , __a , __a) def UpperCAmelCase ( self , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = position[index] while index != 0: _UpperCamelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCamelCase = heap[parent] _UpperCamelCase = position[parent] self.set_position(position[parent] , __a) else: _UpperCamelCase = val _UpperCamelCase = temp self.set_position(__a , __a) break _UpperCamelCase = parent else: _UpperCamelCase = val _UpperCamelCase = temp self.set_position(__a , 0) def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = len(__a) // 2 - 1 for i in range(__a , -1 , -1): self.top_to_bottom(__a , __a , len(__a) , __a) def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = positions[0] _UpperCamelCase = sys.maxsize self.top_to_bottom(__a , 0 , len(__a) , __a) return temp def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = Heap() _UpperCamelCase = [0] * len(__snake_case ) _UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCamelCase = [] for vertex in range(len(__snake_case ) ): distance_tv.append(sys.maxsize ) positions.append(__snake_case ) heap.node_position.append(__snake_case ) _UpperCamelCase = [] _UpperCamelCase = 1 _UpperCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCamelCase = 0 _UpperCamelCase = distance heap.heapify(__snake_case, __snake_case ) for _ in range(1, len(__snake_case ) ): _UpperCamelCase = heap.delete_minimum(__snake_case, __snake_case ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__snake_case )] ): _UpperCamelCase = distance heap.bottom_to_top( __snake_case, heap.get_position(__snake_case ), __snake_case, __snake_case ) _UpperCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _a = int(input("""Enter number of edges: """).strip()) _a = defaultdict(list) for _ in range(edges_number): _a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
78
0
"""simple docstring""" import math def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False _UpperCamelCase = range(3, int(math.sqrt(__snake_case ) + 1 ), 2 ) return not any(not number % i for i in odd_numbers ) def lowerCamelCase__ ( __snake_case, __snake_case=1, **__snake_case ) -> int: """simple docstring""" _UpperCamelCase = factor * value _UpperCamelCase = value while not is_prime(__snake_case ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1, **__snake_case ) return value
711
"""simple docstring""" import json import sys def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" with open(__snake_case, encoding='''utf-8''' ) as f: _UpperCamelCase = json.load(__snake_case ) _UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(__snake_case ): _UpperCamelCase = results[benchmark_name] _UpperCamelCase = benchmark_name.split('''/''' )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) _UpperCamelCase = '''| metric |''' _UpperCamelCase = '''|--------|''' _UpperCamelCase = '''| new / old (diff) |''' for metric_name in sorted(__snake_case ): _UpperCamelCase = benchmark_res[metric_name] _UpperCamelCase = metric_vals['''new'''] _UpperCamelCase = metric_vals.get('''old''', __snake_case ) _UpperCamelCase = metric_vals.get('''diff''', __snake_case ) _UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None''' if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(__snake_case ) ) if __name__ == "__main__": _a = sys.argv[1] _a = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
78
0
"""simple docstring""" from math import pi, sqrt, tan def lowerCamelCase__ ( __snake_case ) -> float: """simple docstring""" if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> float: """simple docstring""" if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def lowerCamelCase__ ( __snake_case ) -> float: """simple docstring""" if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def lowerCamelCase__ ( __snake_case ) -> float: """simple docstring""" if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> float: """simple docstring""" if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) _UpperCamelCase = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(__snake_case, 2 ) * torus_radius * tube_radius def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def lowerCamelCase__ ( __snake_case ) -> float: """simple docstring""" if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> float: """simple docstring""" if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) _UpperCamelCase = (sidea + sidea + sidea) / 2 _UpperCamelCase = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> float: """simple docstring""" if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def lowerCamelCase__ ( __snake_case ) -> float: """simple docstring""" if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def lowerCamelCase__ ( __snake_case, __snake_case ) -> float: """simple docstring""" if not isinstance(__snake_case, __snake_case ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("""[DEMO] Areas of various geometric shapes: \n""") print(F"""Rectangle: {area_rectangle(10, 20) = }""") print(F"""Square: {area_square(10) = }""") print(F"""Triangle: {area_triangle(10, 10) = }""") print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""") print(F"""Parallelogram: {area_parallelogram(10, 20) = }""") print(F"""Rhombus: {area_rhombus(10, 20) = }""") print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""") print(F"""Circle: {area_circle(20) = }""") print(F"""Ellipse: {area_ellipse(10, 20) = }""") print("""\nSurface Areas of various geometric shapes: \n""") print(F"""Cube: {surface_area_cube(20) = }""") print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""") print(F"""Sphere: {surface_area_sphere(20) = }""") print(F"""Hemisphere: {surface_area_hemisphere(20) = }""") print(F"""Cone: {surface_area_cone(10, 20) = }""") print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""") print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""") print(F"""Torus: {surface_area_torus(20, 10) = }""") print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""") print(F"""Square: {area_reg_polygon(4, 10) = }""") print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
712
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> Tuple: """simple docstring""" _UpperCamelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCamelCase = '''''' else: _UpperCamelCase = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCamelCase = in_proj_bias[: config.hidden_size] _UpperCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCamelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = dct.pop(__snake_case ) _UpperCamelCase = val def lowerCamelCase__ ( ) -> Dict: """simple docstring""" _UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = ViTConfig() _UpperCamelCase = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": _UpperCamelCase = True _UpperCamelCase = int(vit_name[-12:-10] ) _UpperCamelCase = int(vit_name[-9:-6] ) else: _UpperCamelCase = 10_00 _UpperCamelCase = '''huggingface/label-files''' _UpperCamelCase = '''imagenet-1k-id2label.json''' _UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) ) _UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = int(vit_name[-6:-4] ) _UpperCamelCase = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): _UpperCamelCase = 1_92 _UpperCamelCase = 7_68 _UpperCamelCase = 12 _UpperCamelCase = 3 elif vit_name[9:].startswith('''small''' ): _UpperCamelCase = 3_84 _UpperCamelCase = 15_36 _UpperCamelCase = 12 _UpperCamelCase = 6 else: pass else: if vit_name[4:].startswith('''small''' ): _UpperCamelCase = 7_68 _UpperCamelCase = 23_04 _UpperCamelCase = 8 _UpperCamelCase = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): _UpperCamelCase = 10_24 _UpperCamelCase = 40_96 _UpperCamelCase = 24 _UpperCamelCase = 16 elif vit_name[4:].startswith('''huge''' ): _UpperCamelCase = 12_80 _UpperCamelCase = 51_20 _UpperCamelCase = 32 _UpperCamelCase = 16 # load original model from timm _UpperCamelCase = timm.create_model(__snake_case, pretrained=__snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCamelCase = timm_model.state_dict() if base_model: remove_classification_head_(__snake_case ) _UpperCamelCase = create_rename_keys(__snake_case, __snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCamelCase = ViTModel(__snake_case ).eval() else: _UpperCamelCase = ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: _UpperCamelCase = DeiTImageProcessor(size=config.image_size ) else: _UpperCamelCase = ViTImageProcessor(size=config.image_size ) _UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ) _UpperCamelCase = encoding['''pixel_values'''] _UpperCamelCase = model(__snake_case ) if base_model: _UpperCamelCase = timm_model.forward_features(__snake_case ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__snake_case, outputs.pooler_output, atol=1e-3 ) else: _UpperCamelCase = timm_model(__snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1e-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _a = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
78
0
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a_( unittest.TestCase ): def __init__( self , __a , __a=13 , __a=3 , __a=2_24 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=[0.5, 0.5, 0.5] , __a=[0.5, 0.5, 0.5] , ) -> Tuple: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_normalize _UpperCamelCase = image_mean _UpperCamelCase = image_std def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a_( lowerCamelCase , unittest.TestCase ): lowercase__ = ViTImageProcessor if is_vision_available() else None def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = EfficientFormerImageProcessorTester(self) @property def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , '''image_mean''')) self.assertTrue(hasattr(__a , '''image_std''')) self.assertTrue(hasattr(__a , '''do_normalize''')) self.assertTrue(hasattr(__a , '''do_resize''')) self.assertTrue(hasattr(__a , '''size''')) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' pass def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input _UpperCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processor(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input _UpperCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processor(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input _UpperCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processor(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
713
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = embedding_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_hidden_groups _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) _UpperCamelCase = model(__a , token_type_ids=__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = AlbertForPreTraining(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertForMaskedLM(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = AlbertForQuestionAnswering(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = AlbertForSequenceClassification(__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = AlbertForTokenClassification(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.num_choices _UpperCamelCase = AlbertForMultipleChoice(config=__a) model.to(__a) model.eval() _UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowercase__ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True def UpperCAmelCase ( self , __a , __a , __a=False) -> List[str]: '''simple docstring''' _UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class in get_values(__a): _UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a) _UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a) return inputs_dict def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = AlbertModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCamelCase = type self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AlbertModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''') _UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]]) _UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCamelCase = model(__a , attention_mask=__a)[0] _UpperCamelCase = torch.Size((1, 11, 7_68)) self.assertEqual(output.shape , __a) _UpperCamelCase = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
78
0
"""simple docstring""" import logging from transformers import PretrainedConfig _a = logging.getLogger(__name__) _a = { """bertabs-finetuned-cnndm""": """https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json""", } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'bertabs' def __init__( self , __a=3_05_22 , __a=5_12 , __a=6 , __a=5_12 , __a=8 , __a=5_12 , __a=0.2 , __a=6 , __a=7_68 , __a=8 , __a=20_48 , __a=0.2 , **__a , ) -> Any: '''simple docstring''' super().__init__(**__a) _UpperCamelCase = vocab_size _UpperCamelCase = max_pos _UpperCamelCase = enc_layers _UpperCamelCase = enc_hidden_size _UpperCamelCase = enc_heads _UpperCamelCase = enc_ff_size _UpperCamelCase = enc_dropout _UpperCamelCase = dec_layers _UpperCamelCase = dec_hidden_size _UpperCamelCase = dec_heads _UpperCamelCase = dec_ff_size _UpperCamelCase = dec_dropout
714
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = np.inf def set_batch_size(__snake_case ) -> None: nonlocal batch_size if isinstance(__snake_case, __snake_case ): _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__snake_case, __snake_case ): _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__snake_case, __snake_case ) and feature.dtype == "binary": _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__snake_case, __snake_case ) return None if batch_size is np.inf else batch_size class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Dict: '''simple docstring''' super().__init__( __a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) _UpperCamelCase = path_or_paths if isinstance(__a , __a) else {self.split: path_or_paths} _UpperCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCamelCase = Parquet( cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , ) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__a , in_memory=self.keep_in_memory) return dataset class _UpperCAmelCase: def __init__( self , __a , __a , __a = None , **__a , ) -> Dict: '''simple docstring''' _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size or get_writer_batch_size(dataset.features) _UpperCamelCase = parquet_writer_kwargs def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: _UpperCamelCase = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs) else: _UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs) return written def UpperCAmelCase ( self , __a , __a , **__a) -> int: '''simple docstring''' _UpperCamelCase = 0 _UpperCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __a) _UpperCamelCase = self.dataset.features.arrow_schema _UpperCamelCase = pq.ParquetWriter(__a , schema=__a , **__a) for offset in logging.tqdm( range(0 , len(self.dataset) , __a) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCamelCase = query_table( table=self.dataset._data , key=slice(__a , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__a) written += batch.nbytes writer.close() return written
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { """configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""], """tokenization_luke""": ["""LukeTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""", """LukeForEntityClassification""", """LukeForEntityPairClassification""", """LukeForEntitySpanClassification""", """LukeForMultipleChoice""", """LukeForQuestionAnswering""", """LukeForSequenceClassification""", """LukeForTokenClassification""", """LukeForMaskedLM""", """LukeModel""", """LukePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
715
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 20} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_flip_channel_order def UpperCAmelCase ( self) -> str: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = MobileViTImageProcessingTester(self) @property def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , '''do_resize''')) self.assertTrue(hasattr(__a , '''size''')) self.assertTrue(hasattr(__a , '''do_center_crop''')) self.assertTrue(hasattr(__a , '''center_crop''')) self.assertTrue(hasattr(__a , '''do_flip_channel_order''')) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'''shortest_edge''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' pass def UpperCAmelCase ( self) -> str: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase ( self) -> int: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
78
0
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCamelCase__ ( __snake_case, __snake_case=0.999, __snake_case="cosine", ) -> Tuple: """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(__snake_case ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__snake_case ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) _UpperCamelCase = [] for i in range(__snake_case ): _UpperCamelCase = i / num_diffusion_timesteps _UpperCamelCase = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__snake_case ) / alpha_bar_fn(__snake_case ), __snake_case ) ) return torch.tensor(__snake_case, dtype=torch.floataa ) class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ): lowercase__ = [e.name for e in KarrasDiffusionSchedulers] lowercase__ = 2 @register_to_config def __init__( self , __a = 10_00 , __a = 0.0_0085 , __a = 0.012 , __a = "linear" , __a = None , __a = "epsilon" , __a = False , __a = False , __a = 1.0 , __a = "linspace" , __a = 0 , ) -> Optional[int]: '''simple docstring''' if trained_betas is not None: _UpperCamelCase = torch.tensor(__a , dtype=torch.floataa) elif beta_schedule == "linear": _UpperCamelCase = torch.linspace(__a , __a , __a , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _UpperCamelCase = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _UpperCamelCase = betas_for_alpha_bar(__a , alpha_transform_type='''cosine''') elif beta_schedule == "exp": _UpperCamelCase = betas_for_alpha_bar(__a , alpha_transform_type='''exp''') else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''') _UpperCamelCase = 1.0 - self.betas _UpperCamelCase = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(__a , __a , __a) _UpperCamelCase = use_karras_sigmas def UpperCAmelCase ( self , __a , __a=None) -> List[Any]: '''simple docstring''' if schedule_timesteps is None: _UpperCamelCase = self.timesteps _UpperCamelCase = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: _UpperCamelCase = 1 if len(__a) > 1 else 0 else: _UpperCamelCase = timestep.cpu().item() if torch.is_tensor(__a) else timestep _UpperCamelCase = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase ( self , __a , __a , ) -> torch.FloatTensor: '''simple docstring''' _UpperCamelCase = self.index_for_timestep(__a) _UpperCamelCase = self.sigmas[step_index] _UpperCamelCase = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase ( self , __a , __a = None , __a = None , ) -> List[Any]: '''simple docstring''' _UpperCamelCase = num_inference_steps _UpperCamelCase = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _UpperCamelCase = np.linspace(0 , num_train_timesteps - 1 , __a , dtype=__a)[::-1].copy() elif self.config.timestep_spacing == "leading": _UpperCamelCase = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCamelCase = (np.arange(0 , __a) * step_ratio).round()[::-1].copy().astype(__a) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _UpperCamelCase = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCamelCase = (np.arange(__a , 0 , -step_ratio)).round().copy().astype(__a) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''') _UpperCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) _UpperCamelCase = np.log(__a) _UpperCamelCase = np.interp(__a , np.arange(0 , len(__a)) , __a) if self.config.use_karras_sigmas: _UpperCamelCase = self._convert_to_karras(in_sigmas=__a , num_inference_steps=self.num_inference_steps) _UpperCamelCase = np.array([self._sigma_to_t(__a , __a) for sigma in sigmas]) _UpperCamelCase = np.concatenate([sigmas, [0.0]]).astype(np.floataa) _UpperCamelCase = torch.from_numpy(__a).to(device=__a) _UpperCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]]) _UpperCamelCase = torch.from_numpy(__a) _UpperCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)]) if str(__a).startswith('''mps'''): # mps does not support float64 _UpperCamelCase = timesteps.to(__a , dtype=torch.floataa) else: _UpperCamelCase = timesteps.to(device=__a) # empty dt and derivative _UpperCamelCase = None _UpperCamelCase = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _UpperCamelCase = defaultdict(__a) def UpperCAmelCase ( self , __a , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = np.log(__a) # get distribution _UpperCamelCase = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range _UpperCamelCase = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) _UpperCamelCase = low_idx + 1 _UpperCamelCase = log_sigmas[low_idx] _UpperCamelCase = log_sigmas[high_idx] # interpolate sigmas _UpperCamelCase = (low - log_sigma) / (low - high) _UpperCamelCase = np.clip(__a , 0 , 1) # transform interpolation to time range _UpperCamelCase = (1 - w) * low_idx + w * high_idx _UpperCamelCase = t.reshape(sigma.shape) return t def UpperCAmelCase ( self , __a , __a) -> torch.FloatTensor: '''simple docstring''' _UpperCamelCase = in_sigmas[-1].item() _UpperCamelCase = in_sigmas[0].item() _UpperCamelCase = 7.0 # 7.0 is the value used in the paper _UpperCamelCase = np.linspace(0 , 1 , __a) _UpperCamelCase = sigma_min ** (1 / rho) _UpperCamelCase = sigma_max ** (1 / rho) _UpperCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return self.dt is None def UpperCAmelCase ( self , __a , __a , __a , __a = True , ) -> Union[SchedulerOutput, Tuple]: '''simple docstring''' _UpperCamelCase = self.index_for_timestep(__a) # advance index counter by 1 _UpperCamelCase = timestep.cpu().item() if torch.is_tensor(__a) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _UpperCamelCase = self.sigmas[step_index] _UpperCamelCase = self.sigmas[step_index + 1] else: # 2nd order / Heun's method _UpperCamelCase = self.sigmas[step_index - 1] _UpperCamelCase = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _UpperCamelCase = 0 _UpperCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_next _UpperCamelCase = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _UpperCamelCase = sigma_hat if self.state_in_first_order else sigma_next _UpperCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": _UpperCamelCase = model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''') if self.config.clip_sample: _UpperCamelCase = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _UpperCamelCase = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _UpperCamelCase = sigma_next - sigma_hat # store for 2nd order step _UpperCamelCase = derivative _UpperCamelCase = dt _UpperCamelCase = sample else: # 2. 2nd order / Heun's method _UpperCamelCase = (sample - pred_original_sample) / sigma_next _UpperCamelCase = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample _UpperCamelCase = self.dt _UpperCamelCase = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__a) def UpperCAmelCase ( self , __a , __a , __a , ) -> torch.FloatTensor: '''simple docstring''' _UpperCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(__a): # mps does not support float64 _UpperCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa) _UpperCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa) else: _UpperCamelCase = self.timesteps.to(original_samples.device) _UpperCamelCase = timesteps.to(original_samples.device) _UpperCamelCase = [self.index_for_timestep(__a , __a) for t in timesteps] _UpperCamelCase = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): _UpperCamelCase = sigma.unsqueeze(-1) _UpperCamelCase = original_samples + noise * sigma return noisy_samples def __len__( self) -> int: '''simple docstring''' return self.config.num_train_timesteps
716
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['image_processor', 'tokenizer'] lowercase__ = 'OwlViTImageProcessor' lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __a=None , __a=None , **__a) -> List[Any]: '''simple docstring''' _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __a , ) _UpperCamelCase = kwargs.pop('''feature_extractor''') _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(__a , __a) def __call__( self , __a=None , __a=None , __a=None , __a="max_length" , __a="np" , **__a) -> List[str]: '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''') if text is not None: if isinstance(__a , __a) or (isinstance(__a , __a) and not isinstance(text[0] , __a)): _UpperCamelCase = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)] elif isinstance(__a , __a) and isinstance(text[0] , __a): _UpperCamelCase = [] # Maximum number of queries across batch _UpperCamelCase = max([len(__a) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__a) != max_num_queries: _UpperCamelCase = t + [''' '''] * (max_num_queries - len(__a)) _UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a) encodings.append(__a) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''') if return_tensors == "np": _UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0) _UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0) else: raise ValueError('''Target return tensor type could not be returned''') _UpperCamelCase = BatchEncoding() _UpperCamelCase = input_ids _UpperCamelCase = attention_mask if query_images is not None: _UpperCamelCase = BatchEncoding() _UpperCamelCase = self.image_processor( __a , return_tensors=__a , **__a).pixel_values _UpperCamelCase = query_pixel_values if images is not None: _UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a) if text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a) , tensor_type=__a) def UpperCAmelCase ( self , *__a , **__a) -> str: '''simple docstring''' return self.image_processor.post_process(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Dict: '''simple docstring''' return self.image_processor.post_process_object_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]: '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]: '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*__a , **__a) @property def UpperCAmelCase ( self) -> str: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , ) return self.image_processor_class @property def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , ) return self.image_processor
78
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class _UpperCAmelCase: def __init__( self , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = data _UpperCamelCase = [0X67452301, 0XEFCDAB89, 0X98BADCFE, 0X10325476, 0XC3D2E1F0] @staticmethod def UpperCAmelCase ( __a , __a) -> Tuple: '''simple docstring''' return ((n << b) | (n >> (32 - b))) & 0XFFFFFFFF def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = B'''\x80''' + B'''\x00''' * (63 - (len(self.data) + 8) % 64) _UpperCamelCase = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data)) return padded_data def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64) ] def UpperCAmelCase ( self , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = list(struct.unpack('''>16L''' , __a)) + [0] * 64 for i in range(16 , 80): _UpperCamelCase = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1) return w def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.padding() _UpperCamelCase = self.split_blocks() for block in self.blocks: _UpperCamelCase = self.expand_block(__a) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.h for i in range(0 , 80): if 0 <= i < 20: _UpperCamelCase = (b & c) | ((~b) & d) _UpperCamelCase = 0X5A827999 elif 20 <= i < 40: _UpperCamelCase = b ^ c ^ d _UpperCamelCase = 0X6ED9EBA1 elif 40 <= i < 60: _UpperCamelCase = (b & c) | (b & d) | (c & d) _UpperCamelCase = 0X8F1BBCDC elif 60 <= i < 80: _UpperCamelCase = b ^ c ^ d _UpperCamelCase = 0XCA62C1D6 _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = ( self.rotate(__a , 5) + f + e + k + expanded_block[i] & 0XFFFFFFFF, a, self.rotate(__a , 30), c, d, ) _UpperCamelCase = ( self.h[0] + a & 0XFFFFFFFF, self.h[1] + b & 0XFFFFFFFF, self.h[2] + c & 0XFFFFFFFF, self.h[3] + d & 0XFFFFFFFF, self.h[4] + e & 0XFFFFFFFF, ) return ("{:08x}" * 5).format(*self.h) def lowerCamelCase__ ( ): """simple docstring""" _UpperCamelCase = b'''Test String''' assert SHAaHash(__snake_case ).final_hash() == hashlib.shaa(__snake_case ).hexdigest() # noqa: S324 def lowerCamelCase__ ( ): """simple docstring""" _UpperCamelCase = argparse.ArgumentParser(description='''Process some strings or files''' ) parser.add_argument( '''--string''', dest='''input_string''', default='''Hello World!! Welcome to Cryptography''', help='''Hash the string''', ) parser.add_argument('''--file''', dest='''input_file''', help='''Hash contents of a file''' ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file, '''rb''' ) as f: _UpperCamelCase = f.read() else: _UpperCamelCase = bytes(__snake_case, '''utf-8''' ) print(SHAaHash(__snake_case ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _a = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["""PerceiverFeatureExtractor"""] _a = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> None: """simple docstring""" if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): _UpperCamelCase , _UpperCamelCase = array[indexa], array[indexa] def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> None: """simple docstring""" if length > 1: _UpperCamelCase = int(length / 2 ) for i in range(__snake_case, low + middle ): comp_and_swap(__snake_case, __snake_case, i + middle, __snake_case ) bitonic_merge(__snake_case, __snake_case, __snake_case, __snake_case ) bitonic_merge(__snake_case, low + middle, __snake_case, __snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> None: """simple docstring""" if length > 1: _UpperCamelCase = int(length / 2 ) bitonic_sort(__snake_case, __snake_case, __snake_case, 1 ) bitonic_sort(__snake_case, low + middle, __snake_case, 0 ) bitonic_merge(__snake_case, __snake_case, __snake_case, __snake_case ) if __name__ == "__main__": _a = input("""Enter numbers separated by a comma:\n""").strip() _a = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
718
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = patch_size _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 _UpperCamelCase = frequency_out_dimension * time_out_dimension _UpperCamelCase = num_patches + 2 def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = self.get_config() return config, input_values, labels def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ASTModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_values''': input_values} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowercase__ = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = ASTModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37) def UpperCAmelCase ( self) -> int: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) _UpperCamelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''input_values'''] self.assertListEqual(arg_names[:1] , __a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = ASTModel.from_pretrained(__a) self.assertIsNotNone(__a) def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" _UpperCamelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' ) _UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case ) return audio, sampling_rate @require_torch @require_torchaudio class _UpperCAmelCase( unittest.TestCase ): @cached_property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''') if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.default_feature_extractor _UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a) _UpperCamelCase = self.default_feature_extractor _UpperCamelCase , _UpperCamelCase = prepare_audio() _UpperCamelCase = audio.squeeze().numpy() _UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a) # forward pass with torch.no_grad(): _UpperCamelCase = model(**__a) # verify the logits _UpperCamelCase = torch.Size((1, 5_27)) self.assertEqual(outputs.logits.shape , __a) _UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
78
0
"""simple docstring""" from typing import Optional, Tuple, Union import torch from einops import rearrange, reduce from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput _a = 8 def lowerCamelCase__ ( __snake_case, __snake_case=BITS ) -> Tuple: """simple docstring""" _UpperCamelCase = x.device _UpperCamelCase = (x * 2_55).int().clamp(0, 2_55 ) _UpperCamelCase = 2 ** torch.arange(bits - 1, -1, -1, device=__snake_case ) _UpperCamelCase = rearrange(__snake_case, '''d -> d 1 1''' ) _UpperCamelCase = rearrange(__snake_case, '''b c h w -> b c 1 h w''' ) _UpperCamelCase = ((x & mask) != 0).float() _UpperCamelCase = rearrange(__snake_case, '''b c d h w -> b (c d) h w''' ) _UpperCamelCase = bits * 2 - 1 return bits def lowerCamelCase__ ( __snake_case, __snake_case=BITS ) -> Tuple: """simple docstring""" _UpperCamelCase = x.device _UpperCamelCase = (x > 0).int() _UpperCamelCase = 2 ** torch.arange(bits - 1, -1, -1, device=__snake_case, dtype=torch.intaa ) _UpperCamelCase = rearrange(__snake_case, '''d -> d 1 1''' ) _UpperCamelCase = rearrange(__snake_case, '''b (c d) h w -> b c d h w''', d=8 ) _UpperCamelCase = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''' ) return (dec / 2_55).clamp(0.0, 1.0 ) def lowerCamelCase__ ( self, __snake_case, __snake_case, __snake_case, __snake_case = 0.0, __snake_case = True, __snake_case=None, __snake_case = True, ) -> Union[DDIMSchedulerOutput, Tuple]: """simple docstring""" if self.num_inference_steps is None: raise ValueError( '''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' ) # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf # Ideally, read DDIM paper in-detail understanding # Notation (<variable name> -> <name in paper> # - pred_noise_t -> e_theta(x_t, t) # - pred_original_sample -> f_theta(x_t, t) or x_0 # - std_dev_t -> sigma_t # - eta -> η # - pred_sample_direction -> "direction pointing to x_t" # - pred_prev_sample -> "x_t-1" # 1. get previous step value (=t-1) _UpperCamelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas _UpperCamelCase = self.alphas_cumprod[timestep] _UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod _UpperCamelCase = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 # 4. Clip "predicted x_0" _UpperCamelCase = self.bit_scale if self.config.clip_sample: _UpperCamelCase = torch.clamp(__snake_case, -scale, __snake_case ) # 5. compute variance: "sigma_t(η)" -> see formula (16) # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1) _UpperCamelCase = self._get_variance(__snake_case, __snake_case ) _UpperCamelCase = eta * variance ** 0.5 if use_clipped_model_output: # the model_output is always re-derived from the clipped x_0 in Glide _UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCamelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if eta > 0: # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072 _UpperCamelCase = model_output.device if torch.is_tensor(__snake_case ) else '''cpu''' _UpperCamelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=__snake_case ).to(__snake_case ) _UpperCamelCase = self._get_variance(__snake_case, __snake_case ) ** 0.5 * eta * noise _UpperCamelCase = prev_sample + variance if not return_dict: return (prev_sample,) return DDIMSchedulerOutput(prev_sample=__snake_case, pred_original_sample=__snake_case ) def lowerCamelCase__ ( self, __snake_case, __snake_case, __snake_case, __snake_case="epsilon", __snake_case=None, __snake_case = True, ) -> Union[DDPMSchedulerOutput, Tuple]: """simple docstring""" _UpperCamelCase = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]: _UpperCamelCase , _UpperCamelCase = torch.split(__snake_case, sample.shape[1], dim=1 ) else: _UpperCamelCase = None # 1. compute alphas, betas _UpperCamelCase = self.alphas_cumprod[t] _UpperCamelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one _UpperCamelCase = 1 - alpha_prod_t _UpperCamelCase = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if prediction_type == "epsilon": _UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif prediction_type == "sample": _UpperCamelCase = model_output else: raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' ) # 3. Clip "predicted x_0" _UpperCamelCase = self.bit_scale if self.config.clip_sample: _UpperCamelCase = torch.clamp(__snake_case, -scale, __snake_case ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCamelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t _UpperCamelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise _UpperCamelCase = 0 if t > 0: _UpperCamelCase = torch.randn( model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=__snake_case ).to(model_output.device ) _UpperCamelCase = (self._get_variance(__snake_case, predicted_variance=__snake_case ) ** 0.5) * noise _UpperCamelCase = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return DDPMSchedulerOutput(prev_sample=__snake_case, pred_original_sample=__snake_case ) class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a , __a = 1.0 , ) -> str: '''simple docstring''' super().__init__() _UpperCamelCase = bit_scale _UpperCamelCase = ( ddim_bit_scheduler_step if isinstance(__a , __a) else ddpm_bit_scheduler_step ) self.register_modules(unet=__a , scheduler=__a) @torch.no_grad() def __call__( self , __a = 2_56 , __a = 2_56 , __a = 50 , __a = None , __a = 1 , __a = "pil" , __a = True , **__a , ) -> Union[Tuple, ImagePipelineOutput]: '''simple docstring''' _UpperCamelCase = torch.randn( (batch_size, self.unet.config.in_channels, height, width) , generator=__a , ) _UpperCamelCase = decimal_to_bits(__a) * self.bit_scale _UpperCamelCase = latents.to(self.device) self.scheduler.set_timesteps(__a) for t in self.progress_bar(self.scheduler.timesteps): # predict the noise residual _UpperCamelCase = self.unet(__a , __a).sample # compute the previous noisy sample x_t -> x_t-1 _UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample _UpperCamelCase = bits_to_decimal(__a) if output_type == "pil": _UpperCamelCase = self.numpy_to_pil(__a) if not return_dict: return (image,) return ImagePipelineOutput(images=__a)
719
"""simple docstring""" def lowerCamelCase__ ( ) -> list[list[int]]: """simple docstring""" return [list(range(10_00 - i, -10_00 - i, -1 ) ) for i in range(10_00 )] _a = generate_large_matrix() _a = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( __snake_case ) -> None: """simple docstring""" assert all(row == sorted(__snake_case, reverse=__snake_case ) for row in grid ) assert all(list(__snake_case ) == sorted(__snake_case, reverse=__snake_case ) for col in zip(*__snake_case ) ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(__snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _UpperCamelCase = (left + right) // 2 _UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _UpperCamelCase = mid + 1 else: _UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__snake_case ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(grid[0] ) for i in range(len(__snake_case ) ): _UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(__snake_case ) * len(grid[0] )) - total def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 for row in grid: for i, number in enumerate(__snake_case ): if number < 0: total += len(__snake_case ) - i break return total def lowerCamelCase__ ( ) -> None: """simple docstring""" from timeit import timeit print('''Running benchmarks''' ) _UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _UpperCamelCase = timeit(F'''{func}(grid=grid)''', setup=__snake_case, number=5_00 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
78
0
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _UpperCAmelCase( tf.keras.layers.Layer ): def __init__( self , __a , __a , __a = None , __a = None) -> List[str]: '''simple docstring''' super().__init__() _UpperCamelCase = pad_token_id _UpperCamelCase = max_length _UpperCamelCase = vocab _UpperCamelCase = merges _UpperCamelCase = BytePairTokenizer(__a , __a , sequence_length=__a) @classmethod def UpperCAmelCase ( cls , __a , *__a , **__a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = [''' '''.join(__a) for m in tokenizer.bpe_ranks.keys()] _UpperCamelCase = tokenizer.get_vocab() return cls(__a , __a , *__a , **__a) @classmethod def UpperCAmelCase ( cls , __a , *__a , **__a) -> int: '''simple docstring''' _UpperCamelCase = GPTaTokenizer.from_pretrained(__a , *__a , **__a) return cls.from_tokenizer(__a , *__a , **__a) @classmethod def UpperCAmelCase ( cls , __a) -> str: '''simple docstring''' return cls(**__a) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def UpperCAmelCase ( self , __a , __a = None) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.tf_tokenizer(__a) _UpperCamelCase = tf.ones_like(__a) if self.pad_token_id is not None: # pad the tokens up to max length _UpperCamelCase = max_length if max_length is not None else self.max_length if max_length is not None: _UpperCamelCase , _UpperCamelCase = pad_model_inputs( __a , max_seq_length=__a , pad_value=self.pad_token_id) return {"attention_mask": attention_mask, "input_ids": input_ids}
720
"""simple docstring""" import copy import re class _UpperCAmelCase: lowercase__ = 'hp' lowercase__ = {} lowercase__ = None @classmethod def UpperCAmelCase ( cls , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = prefix _UpperCamelCase = defaults cls.build_naming_info() @staticmethod def UpperCAmelCase ( __a , __a) -> Union[str, Any]: '''simple docstring''' if len(__a) == 0: return "" _UpperCamelCase = None if any(char.isdigit() for char in word): raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a) + 1): _UpperCamelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCamelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a): _UpperCamelCase = '''''' while integer != 0: _UpperCamelCase = chr(ord('''A''') + integer % 10) + s integer //= 10 return s _UpperCamelCase = 0 while True: _UpperCamelCase = word + '''#''' + int_to_alphabetic(__a) if sword in info["reverse_short_word"]: continue else: _UpperCamelCase = sword break _UpperCamelCase = short_word _UpperCamelCase = word return short_word @staticmethod def UpperCAmelCase ( __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = param_name.split('''_''') _UpperCamelCase = [TrialShortNamer.shortname_for_word(__a , __a) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCamelCase = ['''''', '''_'''] for separator in separators: _UpperCamelCase = separator.join(__a) if shortname not in info["reverse_short_param"]: _UpperCamelCase = shortname _UpperCamelCase = param_name return shortname return param_name @staticmethod def UpperCAmelCase ( __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = TrialShortNamer.shortname_for_key(__a , __a) _UpperCamelCase = short_name _UpperCamelCase = param_name @classmethod def UpperCAmelCase ( cls) -> Any: '''simple docstring''' if cls.NAMING_INFO is not None: return _UpperCamelCase = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _UpperCamelCase = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(__a , __a) _UpperCamelCase = info @classmethod def UpperCAmelCase ( cls , __a) -> Optional[Any]: '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _UpperCamelCase = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'''You should provide a default value for the param name {k} with value {v}''') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCamelCase = cls.NAMING_INFO['''short_param'''][k] if isinstance(__a , __a): _UpperCamelCase = 1 if v else 0 _UpperCamelCase = '''''' if isinstance(__a , (int, float)) else '''-''' _UpperCamelCase = F'''{key}{sep}{v}''' name.append(__a) return "_".join(__a) @classmethod def UpperCAmelCase ( cls , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = repr[len(cls.PREFIX) + 1 :] if repr == "": _UpperCamelCase = [] else: _UpperCamelCase = repr.split('''_''') _UpperCamelCase = {} for value in values: if "-" in value: _UpperCamelCase , _UpperCamelCase = value.split('''-''') else: _UpperCamelCase = re.sub('''[0-9.]''' , '''''' , __a) _UpperCamelCase = float(re.sub('''[^0-9.]''' , '''''' , __a)) _UpperCamelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k] _UpperCamelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCamelCase = cls.DEFAULTS[k] return parameters
78
0
"""simple docstring""" from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = 10**-10 ) -> float: """simple docstring""" _UpperCamelCase = a while True: _UpperCamelCase = Decimal(__snake_case ) - ( Decimal(eval(__snake_case ) ) / Decimal(eval(str(diff(__snake_case ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__snake_case ) ) < precision: # noqa: S307 return float(__snake_case ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""") # Find Square Root of 5 print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""") # Exponential Roots print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
721
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) ) _UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) ) _UpperCamelCase = 0.01 with locka.acquire(): with pytest.raises(__snake_case ): _UpperCamelCase = time.time() locka.acquire(__snake_case ) assert time.time() - _start > timeout def lowerCamelCase__ ( __snake_case ) -> List[Any]: """simple docstring""" _UpperCamelCase = '''a''' * 10_00 + '''.lock''' _UpperCamelCase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(__snake_case ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 _UpperCamelCase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__snake_case ): locka.acquire(0 )
78
0
"""simple docstring""" import unittest from transformers.testing_utils import CaptureStdout from transformers.tools.python_interpreter import evaluate def lowerCamelCase__ ( __snake_case ): """simple docstring""" return x + 2 class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = '''x = 3''' _UpperCamelCase = {} _UpperCamelCase = evaluate(__a , {} , state=__a) assert result == 3 self.assertDictEqual(__a , {'''x''': 3}) _UpperCamelCase = '''x = y''' _UpperCamelCase = {'''y''': 5} _UpperCamelCase = evaluate(__a , {} , state=__a) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__a , {'''x''': 5, '''y''': 5}) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = '''y = add_two(x)''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {'''add_two''': add_two} , state=__a) assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''y''': 5}) # Won't work without the tool with CaptureStdout() as out: _UpperCamelCase = evaluate(__a , {} , state=__a) assert result is None assert "tried to execute add_two" in out.out def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = '''x = 3''' _UpperCamelCase = {} _UpperCamelCase = evaluate(__a , {} , state=__a) assert result == 3 self.assertDictEqual(__a , {'''x''': 3}) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = '''test_dict = {\'x\': x, \'y\': add_two(x)}''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {'''add_two''': add_two} , state=__a) self.assertDictEqual(__a , {'''x''': 3, '''y''': 5}) self.assertDictEqual(__a , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}}) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = '''x = 3\ny = 5''' _UpperCamelCase = {} _UpperCamelCase = evaluate(__a , {} , state=__a) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''y''': 5}) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = '''text = f\'This is x: {x}.\'''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {} , state=__a) # evaluate returns the value of the last assignment. assert result == "This is x: 3." self.assertDictEqual(__a , {'''x''': 3, '''text''': '''This is x: 3.'''}) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = '''if x <= 3:\n y = 2\nelse:\n y = 5''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {} , state=__a) # evaluate returns the value of the last assignment. assert result == 2 self.assertDictEqual(__a , {'''x''': 3, '''y''': 2}) _UpperCamelCase = {'''x''': 8} _UpperCamelCase = evaluate(__a , {} , state=__a) # evaluate returns the value of the last assignment. assert result == 5 self.assertDictEqual(__a , {'''x''': 8, '''y''': 5}) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = '''test_list = [x, add_two(x)]''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {'''add_two''': add_two} , state=__a) self.assertListEqual(__a , [3, 5]) self.assertDictEqual(__a , {'''x''': 3, '''test_list''': [3, 5]}) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = '''y = x''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {} , state=__a) assert result == 3 self.assertDictEqual(__a , {'''x''': 3, '''y''': 3}) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = '''test_list = [x, add_two(x)]\ntest_list[1]''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {'''add_two''': add_two} , state=__a) assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''test_list''': [3, 5]}) _UpperCamelCase = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']''' _UpperCamelCase = {'''x''': 3} _UpperCamelCase = evaluate(__a , {'''add_two''': add_two} , state=__a) assert result == 5 self.assertDictEqual(__a , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}}) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = '''x = 0\nfor i in range(3):\n x = i''' _UpperCamelCase = {} _UpperCamelCase = evaluate(__a , {'''range''': range} , state=__a) assert result == 2 self.assertDictEqual(__a , {'''x''': 2, '''i''': 2})
700
"""simple docstring""" from math import sqrt def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' must been an int and positive" _UpperCamelCase = True # 0 and 1 are none primes. if number <= 1: _UpperCamelCase = False for divisor in range(2, int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: _UpperCamelCase = False break # precondition assert isinstance(__snake_case, __snake_case ), "'status' must been from type bool" return status def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N _UpperCamelCase = list(range(2, n + 1 ) ) _UpperCamelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1, len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): _UpperCamelCase = 0 # filters actual prime numbers. _UpperCamelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2" _UpperCamelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and number >= 0, "'number' must been an int and >= 0" _UpperCamelCase = [] # this list will be returns of the function. # potential prime number factors. _UpperCamelCase = 2 _UpperCamelCase = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase = 0 # prime factorization of 'number' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = max(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int" return ans def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase = 0 # prime factorization of 'number' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = min(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int" return ans def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0, __snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0, __snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" _UpperCamelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' _UpperCamelCase = get_prime_numbers(__snake_case ) _UpperCamelCase = len(__snake_case ) # run variable for while-loops. _UpperCamelCase = 0 _UpperCamelCase = None # exit variable. for break up the loops _UpperCamelCase = True while i < len_pn and loop: _UpperCamelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: _UpperCamelCase = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case, __snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCamelCase__ ( __snake_case, __snake_case ) -> str: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase = 0 while numbera != 0: _UpperCamelCase = numbera % numbera _UpperCamelCase = numbera _UpperCamelCase = rest # precondition assert isinstance(__snake_case, __snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = max(__snake_case, __snake_case ) _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: _UpperCamelCase = prime_fac_a.count(__snake_case ) _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case, __snake_case ) ): ans *= n else: _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'number' must been a positive int" _UpperCamelCase = 0 _UpperCamelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case, __snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple: """simple docstring""" assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" _UpperCamelCase = p_number_a + 1 # jump to the next number _UpperCamelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case, __snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCamelCase__ ( __snake_case ) -> List[Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 1), "'n' must been int and >= 1" _UpperCamelCase = [] # will be returned. for divisor in range(1, n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" _UpperCamelCase = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case, __snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. _UpperCamelCase = gcd(abs(__snake_case ), abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case, __snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been a int and >= 0" _UpperCamelCase = 1 # this will be return. for factor in range(1, n + 1 ): ans *= factor return ans def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been an int and >= 0" _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = 1 # this will be return for _ in range(n - 1 ): _UpperCamelCase = ans ans += fiba _UpperCamelCase = tmp return ans
78
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
701
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} ) lowercase__ = field( default=lowerCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowercase__ = field( default=lowerCamelCase , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) lowercase__ = field( default=lowerCamelCase , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) lowercase__ = field( default=lowerCamelCase , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(__a , __a): _UpperCamelCase = v.to_dict() return d
78
0
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def lowerCamelCase__ ( __snake_case ) -> List[Any]: """simple docstring""" if "img_encoder.pos_embed" in name: _UpperCamelCase = name.replace('''img_encoder.pos_embed''', '''vision_model.embeddings.position_embeddings''' ) if "img_encoder.patch_embed.proj" in name: _UpperCamelCase = name.replace('''img_encoder.patch_embed.proj''', '''vision_model.embeddings.patch_embeddings.projection''' ) if "img_encoder.patch_embed.norm" in name: _UpperCamelCase = name.replace('''img_encoder.patch_embed.norm''', '''vision_model.embeddings.layernorm''' ) if "img_encoder.layers" in name: _UpperCamelCase = name.replace('''img_encoder.layers''', '''vision_model.encoder.stages''' ) if "blocks" in name and "res" not in name: _UpperCamelCase = name.replace('''blocks''', '''layers''' ) if "attn" in name and "pre_assign" not in name: _UpperCamelCase = name.replace('''attn''', '''self_attn''' ) if "proj" in name and "self_attn" in name and "text" not in name: _UpperCamelCase = name.replace('''proj''', '''out_proj''' ) if "pre_assign_attn.attn.proj" in name: _UpperCamelCase = name.replace('''pre_assign_attn.attn.proj''', '''pre_assign_attn.attn.out_proj''' ) if "norm1" in name: _UpperCamelCase = name.replace('''norm1''', '''layer_norm1''' ) if "norm2" in name and "pre_assign" not in name: _UpperCamelCase = name.replace('''norm2''', '''layer_norm2''' ) if "img_encoder.norm" in name: _UpperCamelCase = name.replace('''img_encoder.norm''', '''vision_model.layernorm''' ) # text encoder if "text_encoder.token_embedding" in name: _UpperCamelCase = name.replace('''text_encoder.token_embedding''', '''text_model.embeddings.token_embedding''' ) if "text_encoder.positional_embedding" in name: _UpperCamelCase = name.replace('''text_encoder.positional_embedding''', '''text_model.embeddings.position_embedding.weight''' ) if "text_encoder.transformer.resblocks." in name: _UpperCamelCase = name.replace('''text_encoder.transformer.resblocks.''', '''text_model.encoder.layers.''' ) if "ln_1" in name: _UpperCamelCase = name.replace('''ln_1''', '''layer_norm1''' ) if "ln_2" in name: _UpperCamelCase = name.replace('''ln_2''', '''layer_norm2''' ) if "c_fc" in name: _UpperCamelCase = name.replace('''c_fc''', '''fc1''' ) if "c_proj" in name: _UpperCamelCase = name.replace('''c_proj''', '''fc2''' ) if "text_encoder" in name: _UpperCamelCase = name.replace('''text_encoder''', '''text_model''' ) if "ln_final" in name: _UpperCamelCase = name.replace('''ln_final''', '''final_layer_norm''' ) # projection layers if "img_projector.linear_hidden." in name: _UpperCamelCase = name.replace('''img_projector.linear_hidden.''', '''visual_projection.''' ) if "img_projector.linear_out." in name: _UpperCamelCase = name.replace('''img_projector.linear_out.''', '''visual_projection.3.''' ) if "text_projector.linear_hidden" in name: _UpperCamelCase = name.replace('''text_projector.linear_hidden''', '''text_projection''' ) if "text_projector.linear_out" in name: _UpperCamelCase = name.replace('''text_projector.linear_out''', '''text_projection.3''' ) return name def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple: """simple docstring""" for key in orig_state_dict.copy().keys(): _UpperCamelCase = orig_state_dict.pop(__snake_case ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _UpperCamelCase = key.split('''.''' ) _UpperCamelCase , _UpperCamelCase = int(key_split[2] ), int(key_split[4] ) _UpperCamelCase = config.vision_config.hidden_size if "weight" in key: _UpperCamelCase = val[:dim, :] _UpperCamelCase = val[dim : dim * 2, :] _UpperCamelCase = val[-dim:, :] else: _UpperCamelCase = val[:dim] _UpperCamelCase = val[dim : dim * 2] _UpperCamelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _UpperCamelCase = key.split('''.''' ) _UpperCamelCase = int(key_split[3] ) _UpperCamelCase = config.text_config.hidden_size if "weight" in key: _UpperCamelCase = val[:dim, :] _UpperCamelCase = val[ dim : dim * 2, : ] _UpperCamelCase = val[-dim:, :] else: _UpperCamelCase = val[:dim] _UpperCamelCase = val[dim : dim * 2] _UpperCamelCase = val[-dim:] else: _UpperCamelCase = rename_key(__snake_case ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): _UpperCamelCase = val.squeeze_() else: _UpperCamelCase = val return orig_state_dict def lowerCamelCase__ ( ) -> int: """simple docstring""" _UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case="groupvit-gcc-yfcc", __snake_case=False ) -> Optional[int]: """simple docstring""" _UpperCamelCase = GroupViTConfig() _UpperCamelCase = GroupViTModel(__snake_case ).eval() _UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' )['''model'''] _UpperCamelCase = convert_state_dict(__snake_case, __snake_case ) _UpperCamelCase , _UpperCamelCase = model.load_state_dict(__snake_case, strict=__snake_case ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__snake_case ) == 0) # verify result _UpperCamelCase = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) _UpperCamelCase = prepare_img() _UpperCamelCase = processor(text=['''a photo of a cat''', '''a photo of a dog'''], images=__snake_case, padding=__snake_case, return_tensors='''pt''' ) with torch.no_grad(): _UpperCamelCase = model(**__snake_case ) if model_name == "groupvit-gcc-yfcc": _UpperCamelCase = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": _UpperCamelCase = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image, __snake_case, atol=1e-3 ) processor.save_pretrained(__snake_case ) model.save_pretrained(__snake_case ) print('''Successfully saved processor and model to''', __snake_case ) if push_to_hub: print('''Pushing to the hub...''' ) processor.push_to_hub(__snake_case, organization='''nielsr''' ) model.push_to_hub(__snake_case, organization='''nielsr''' ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model.""" ) parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""") parser.add_argument( """--model_name""", default="""groupvit-gccy-fcc""", type=str, help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""", ) _a = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
702
"""simple docstring""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) _a = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase = k.replace(__snake_case, __snake_case ) if k.startswith('''encoder''' ): _UpperCamelCase = k.replace('''.attn''', '''.self_attn''' ) _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' ) return k def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _UpperCamelCase = sd.pop(__snake_case ) _UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd _UpperCamelCase = v _a = ["""START"""] @torch.no_grad() def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int: """simple docstring""" _UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' ) _UpperCamelCase = model['''model'''] _UpperCamelCase = BlenderbotConfig.from_json_file(__snake_case ) _UpperCamelCase = BlenderbotForConditionalGeneration(__snake_case ) _UpperCamelCase = m.model.state_dict().keys() _UpperCamelCase = [] _UpperCamelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase = rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case, strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) _a = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
78
0
"""simple docstring""" from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool _a = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'facebook/nllb-200-distilled-600M' lowercase__ = ( 'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ' 'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ' 'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ' 'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.' ) lowercase__ = 'translator' lowercase__ = AutoTokenizer lowercase__ = AutoModelForSeqaSeqLM lowercase__ = LANGUAGE_CODES lowercase__ = ['text', 'text', 'text'] lowercase__ = ['text'] def UpperCAmelCase ( self , __a , __a , __a) -> str: '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''') _UpperCamelCase = self.lang_to_code[src_lang] _UpperCamelCase = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( __a , return_tensors='''pt''' , src_lang=__a , tgt_lang=__a) def UpperCAmelCase ( self , __a) -> List[str]: '''simple docstring''' return self.model.generate(**__a) def UpperCAmelCase ( self , __a) -> Union[str, Any]: '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__a)
703
"""simple docstring""" import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _a = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] _a = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] _a = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _a = F"""down_blocks.{i}.resnets.{j}.""" _a = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _a = F"""down_blocks.{i}.attentions.{j}.""" _a = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _a = F"""up_blocks.{i}.resnets.{j}.""" _a = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _a = F"""up_blocks.{i}.attentions.{j}.""" _a = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _a = F"""down_blocks.{i}.downsamplers.0.conv.""" _a = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _a = F"""up_blocks.{i}.upsamplers.0.""" _a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _a = """mid_block.attentions.0.""" _a = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _a = F"""mid_block.resnets.{j}.""" _a = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" _UpperCamelCase = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: _UpperCamelCase = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v _UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _a = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): _a = F"""encoder.down_blocks.{i}.resnets.{j}.""" _a = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _a = F"""down_blocks.{i}.downsamplers.0.""" _a = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _a = F"""up_blocks.{i}.upsamplers.0.""" _a = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _a = F"""decoder.up_blocks.{i}.resnets.{j}.""" _a = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _a = F"""mid_block.resnets.{i}.""" _a = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _a = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" return w.reshape(*w.shape, 1, 1 ) def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v _UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()} _UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out'''] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'''mid.attn_1.{weight_name}.weight''' in k: print(F'''Reshaping {k} for SD format''' ) _UpperCamelCase = reshape_weight_for_sd(__snake_case ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _a = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] _a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _a = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _a = {"""q""": 0, """k""": 1, """v""": 2} def lowerCamelCase__ ( __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = {} _UpperCamelCase = {} for k, v in text_enc_dict.items(): if ( k.endswith('''.self_attn.q_proj.weight''' ) or k.endswith('''.self_attn.k_proj.weight''' ) or k.endswith('''.self_attn.v_proj.weight''' ) ): _UpperCamelCase = k[: -len('''.q_proj.weight''' )] _UpperCamelCase = k[-len('''q_proj.weight''' )] if k_pre not in capture_qkv_weight: _UpperCamelCase = [None, None, None] _UpperCamelCase = v continue if ( k.endswith('''.self_attn.q_proj.bias''' ) or k.endswith('''.self_attn.k_proj.bias''' ) or k.endswith('''.self_attn.v_proj.bias''' ) ): _UpperCamelCase = k[: -len('''.q_proj.bias''' )] _UpperCamelCase = k[-len('''q_proj.bias''' )] if k_pre not in capture_qkv_bias: _UpperCamelCase = [None, None, None] _UpperCamelCase = v continue _UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case ) _UpperCamelCase = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' ) _UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case ) _UpperCamelCase = torch.cat(__snake_case ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' ) _UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case ) _UpperCamelCase = torch.cat(__snake_case ) return new_state_dict def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" return text_enc_dict if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) _a = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") _a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") _a = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _a = load_file(unet_path, device="""cpu""") else: _a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") _a = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): _a = load_file(vae_path, device="""cpu""") else: _a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") _a = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): _a = load_file(text_enc_path, device="""cpu""") else: _a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") _a = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model _a = convert_unet_state_dict(unet_state_dict) _a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _a = convert_vae_state_dict(vae_state_dict) _a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} _a = convert_text_enc_state_dict_vaa(text_enc_dict) _a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: _a = convert_text_enc_state_dict(text_enc_dict) _a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _a = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _a = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _a = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
78
0
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar _a = TypeVar("""T""") class _UpperCAmelCase( Generic[T] ): def __init__( self , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = data _UpperCamelCase = None def __str__( self) -> str: '''simple docstring''' return F'''{self.data}''' class _UpperCAmelCase( Generic[T] ): def __init__( self) -> None: '''simple docstring''' _UpperCamelCase = None def __iter__( self) -> Iterator[T]: '''simple docstring''' _UpperCamelCase = self.top while node: yield node.data _UpperCamelCase = node.next def __str__( self) -> str: '''simple docstring''' return "->".join([str(__a) for item in self]) def __len__( self) -> int: '''simple docstring''' return len(tuple(iter(self))) def UpperCAmelCase ( self) -> bool: '''simple docstring''' return self.top is None def UpperCAmelCase ( self , __a) -> None: '''simple docstring''' _UpperCamelCase = Node(__a) if not self.is_empty(): _UpperCamelCase = self.top _UpperCamelCase = node def UpperCAmelCase ( self) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('''pop from empty stack''') assert isinstance(self.top , __a) _UpperCamelCase = self.top _UpperCamelCase = self.top.next return pop_node.data def UpperCAmelCase ( self) -> T: '''simple docstring''' if self.is_empty(): raise IndexError('''peek from empty stack''') assert self.top is not None return self.top.data def UpperCAmelCase ( self) -> None: '''simple docstring''' _UpperCamelCase = None if __name__ == "__main__": from doctest import testmod testmod()
704
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" if openai_config_file == "": _UpperCamelCase = OpenAIGPTConfig() else: _UpperCamelCase = OpenAIGPTConfig.from_json_file(__snake_case ) _UpperCamelCase = OpenAIGPTModel(__snake_case ) # Load weights from numpy load_tf_weights_in_openai_gpt(__snake_case, __snake_case, __snake_case ) # Save pytorch-model _UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME _UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict(), __snake_case ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) _a = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _a = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
705
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _UpperCAmelCase: lowercase__ = MBartConfig lowercase__ = {} lowercase__ = 'gelu' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> Any: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = eos_token_id _UpperCamelCase = pad_token_id _UpperCamelCase = bos_token_id def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) _UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) _UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCamelCase = prepare_mbart_inputs_dict(__a , __a , __a) return config, inputs_dict def UpperCAmelCase ( self , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = TFMBartModel(config=__a).get_decoder() _UpperCamelCase = inputs_dict['''input_ids'''] _UpperCamelCase = input_ids[:1, :] _UpperCamelCase = inputs_dict['''attention_mask'''][:1, :] _UpperCamelCase = inputs_dict['''head_mask'''] _UpperCamelCase = 1 # first forward pass _UpperCamelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a) _UpperCamelCase , _UpperCamelCase = outputs.to_tuple() _UpperCamelCase = past_key_values[1] def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, ) -> Optional[int]: """simple docstring""" if attention_mask is None: _UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowercase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowercase__ = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowercase__ = True lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = TFMBartModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a) def UpperCAmelCase ( self) -> str: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase( unittest.TestCase ): lowercase__ = [ ' UN Chief Says There Is No Military Solution in Syria', ] lowercase__ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] lowercase__ = 'facebook/mbart-large-en-ro' @cached_property def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def UpperCAmelCase ( self , **__a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.translate_src_text(**__a) self.assertListEqual(self.expected_text , __a) def UpperCAmelCase ( self , **__a) -> Dict: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , **__a , return_tensors='''tf''') _UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2) _UpperCamelCase = self.tokenizer.batch_decode(__a , skip_special_tokens=__a) return generated_words @slow def UpperCAmelCase ( self) -> Any: '''simple docstring''' self._assert_generated_batch_equal_expected()
78
0
"""simple docstring""" import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append(""".""") def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" _UpperCamelCase = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '''`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ''' F'''{test_file} instead.''' ) _UpperCamelCase = components[-1] if not test_fn.endswith('''py''' ): raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' ) if not test_fn.startswith('''test_modeling_''' ): raise ValueError( F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' ) _UpperCamelCase = components[:-1] + [test_fn.replace('''.py''', '''''' )] _UpperCamelCase = '''.'''.join(__snake_case ) return test_module_path def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" _UpperCamelCase = get_module_path(__snake_case ) _UpperCamelCase = importlib.import_module(__snake_case ) return test_module def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = [] _UpperCamelCase = get_test_module(__snake_case ) for attr in dir(__snake_case ): if attr.endswith('''ModelTester''' ): tester_classes.append(getattr(__snake_case, __snake_case ) ) # sort with class names return sorted(__snake_case, key=lambda __snake_case : x.__name__ ) def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = [] _UpperCamelCase = get_test_module(__snake_case ) for attr in dir(__snake_case ): _UpperCamelCase = getattr(__snake_case, __snake_case ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). _UpperCamelCase = getattr(__snake_case, '''all_model_classes''', [] ) if len(__snake_case ) > 0: test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case, key=lambda __snake_case : x.__name__ ) def lowerCamelCase__ ( __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = get_test_classes(__snake_case ) _UpperCamelCase = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(__snake_case, key=lambda __snake_case : x.__name__ ) def lowerCamelCase__ ( __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = test_class() if hasattr(__snake_case, '''setUp''' ): test.setUp() _UpperCamelCase = None if hasattr(__snake_case, '''model_tester''' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: _UpperCamelCase = test.model_tester.__class__ return model_tester def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = get_test_classes(__snake_case ) _UpperCamelCase = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(__snake_case ) # sort with class names return sorted(__snake_case, key=lambda __snake_case : x.__name__ ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]: """simple docstring""" _UpperCamelCase = get_test_classes_for_model(__snake_case, __snake_case ) _UpperCamelCase = [] for test_class in test_classes: _UpperCamelCase = get_model_tester_from_test_class(__snake_case ) if tester_class is not None: tester_classes.append(__snake_case ) # sort with class names return sorted(__snake_case, key=lambda __snake_case : x.__name__ ) def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" _UpperCamelCase = get_test_classes(__snake_case ) _UpperCamelCase = {test_class: get_model_tester_from_test_class(__snake_case ) for test_class in test_classes} return test_tester_mapping def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = get_model_classes(__snake_case ) _UpperCamelCase = { model_class: get_test_classes_for_model(__snake_case, __snake_case ) for model_class in model_classes } return model_test_mapping def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" _UpperCamelCase = get_model_classes(__snake_case ) _UpperCamelCase = { model_class: get_tester_classes_for_model(__snake_case, __snake_case ) for model_class in model_classes } return model_to_tester_mapping def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" if isinstance(__snake_case, __snake_case ): return o elif isinstance(__snake_case, __snake_case ): return o.__name__ elif isinstance(__snake_case, (list, tuple) ): return [to_json(__snake_case ) for x in o] elif isinstance(__snake_case, __snake_case ): return {to_json(__snake_case ): to_json(__snake_case ) for k, v in o.items()} else: return o
706
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _a = logging.get_logger(__name__) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['pixel_values'] def __init__( self , __a = True , __a = 1 / 2_55 , __a = True , __a = 8 , **__a , ) -> None: '''simple docstring''' super().__init__(**__a) _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad _UpperCamelCase = pad_size def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray: '''simple docstring''' return rescale(__a , scale=__a , data_format=__a , **__a) def UpperCAmelCase ( self , __a , __a , __a = None) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = get_image_size(__a) _UpperCamelCase = (old_height // size + 1) * size - old_height _UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__a , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__a) def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Tuple: '''simple docstring''' _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_pad if do_pad is not None else self.do_pad _UpperCamelCase = pad_size if pad_size is not None else self.pad_size _UpperCamelCase = make_list_of_images(__a) if not valid_images(__a): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(__a) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images] if do_pad: _UpperCamelCase = [self.pad(__a , size=__a) for image in images] _UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=__a , tensor_type=__a)
78
0
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _a = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = DebertaVaTokenizer lowercase__ = DebertaVaTokenizerFast lowercase__ = True lowercase__ = True def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = DebertaVaTokenizer(__a , unk_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = '''this is a test''' _UpperCamelCase = '''this is a test''' return input_text, output_text def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = '''<pad>''' _UpperCamelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<pad>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''[PAD]''') self.assertEqual(len(__a) , 3_00_01) def UpperCAmelCase ( self) -> Any: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = ''' \tHeLLo!how \n Are yoU? ''' _UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?'''] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''') def UpperCAmelCase ( self) -> Any: '''simple docstring''' pass @unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''') def UpperCAmelCase ( self) -> Dict: '''simple docstring''' pass def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = ''' \tHeLLo!how \n Are yoU? ''' _UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?'''] # fmt: on _UpperCamelCase = DebertaVaTokenizer(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = DebertaVaTokenizerFast(__a , do_lower_case=__a , split_by_punct=__a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(__a , add_special_tokens=__a)) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__a , add_special_tokens=__a)) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = tokenizer.encode(__a) _UpperCamelCase = rust_tokenizer.encode(__a) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = '''This is a test''' _UpperCamelCase = [13, 1, 43_98, 25, 21, 12_89] _UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test'''] _UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test'''] _UpperCamelCase = DebertaVaTokenizer(__a , keep_accents=__a) _UpperCamelCase = DebertaVaTokenizerFast(__a , keep_accents=__a) _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) # fmt: off _UpperCamelCase = '''I was born in 92000, and this is falsé.''' _UpperCamelCase = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] _UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ] _UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ] # fmt: on _UpperCamelCase = tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.encode(__a , add_special_tokens=__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.tokenize(__a) self.assertListEqual(__a , __a) _UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(__a) self.assertListEqual(__a , __a) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = DebertaVaTokenizer(__a) _UpperCamelCase = tokenizer.encode('''sequence builders''') _UpperCamelCase = tokenizer.encode('''multi-sequence build''') _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a) _UpperCamelCase = tokenizer.build_inputs_with_special_tokens(__a , __a) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __a) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __a , ) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = {'''input_ids''': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
707
"""simple docstring""" from importlib import import_module from .logging import get_logger _a = get_logger(__name__) class _UpperCAmelCase: def __init__( self , __a , __a=None) -> Dict: '''simple docstring''' _UpperCamelCase = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__'''): setattr(self , __a , getattr(__a , __a)) _UpperCamelCase = module._original_module if isinstance(__a , _PatchedModuleObj) else module class _UpperCAmelCase: lowercase__ = [] def __init__( self , __a , __a , __a , __a=None) -> List[str]: '''simple docstring''' _UpperCamelCase = obj _UpperCamelCase = target _UpperCamelCase = new _UpperCamelCase = target.split('''.''')[0] _UpperCamelCase = {} _UpperCamelCase = attrs or [] def __enter__( self) -> int: '''simple docstring''' *_UpperCamelCase , _UpperCamelCase = self.target.split('''.''') # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a)): try: _UpperCamelCase = import_module('''.'''.join(submodules[: i + 1])) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCamelCase = getattr(self.obj , __a) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj) and obj_attr._original_module is submodule) ): _UpperCamelCase = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs)) _UpperCamelCase = getattr(self.obj , __a) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a) , attrs=self.attrs)) _UpperCamelCase = getattr(__a , __a) # finally set the target attribute setattr(__a , __a , self.new) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCamelCase = getattr(import_module('''.'''.join(__a)) , __a) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a) is attr_value: _UpperCamelCase = getattr(self.obj , __a) setattr(self.obj , __a , self.new) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCamelCase = globals()['''__builtins__'''][target_attr] setattr(self.obj , __a , self.new) else: raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''') def __exit__( self , *__a) -> Tuple: '''simple docstring''' for attr in list(self.original): setattr(self.obj , __a , self.original.pop(__a)) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' self.__enter__() self._active_patches.append(self) def UpperCAmelCase ( self) -> str: '''simple docstring''' try: self._active_patches.remove(self) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
78
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _a : Optional[Any] = ["""small""", """medium""", """large"""] _a : str = """lm_head.decoder.weight""" _a : List[str] = """lm_head.weight""" def lowerCamelCase__ ( __snake_case, __snake_case ) -> int: """simple docstring""" _UpperCamelCase = torch.load(__snake_case ) _UpperCamelCase = d.pop(__snake_case ) os.makedirs(__snake_case, exist_ok=__snake_case ) torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) ) if __name__ == "__main__": _a : List[str] = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) _a : Tuple = parser.parse_args() for MODEL in DIALOGPT_MODELS: _a : Optional[Any] = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""") _a : int = F"""./DialoGPT-{MODEL}""" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
708
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
78
0
"""simple docstring""" import argparse from collections import defaultdict import yaml _a = """docs/source/en/_toctree.yml""" def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = defaultdict(__snake_case ) _UpperCamelCase = [] _UpperCamelCase = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(__snake_case ) _UpperCamelCase = new_doc_list _UpperCamelCase = [key for key, value in counts.items() if value > 1] _UpperCamelCase = [] for duplicate_key in duplicates: _UpperCamelCase = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(__snake_case ) > 1: raise ValueError( F'''{duplicate_key} is present several times in the documentation table of content at ''' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) _UpperCamelCase = sorted(__snake_case, key=lambda __snake_case : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__snake_case ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(__snake_case ) # Sort return overview_doc def lowerCamelCase__ ( __snake_case=False ) -> List[str]: """simple docstring""" with open(__snake_case, encoding='''utf-8''' ) as f: _UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc _UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _UpperCamelCase = content[api_idx]['''sections'''] # Then to the model doc _UpperCamelCase = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 _UpperCamelCase = api_doc[scheduler_idx]['''sections'''] _UpperCamelCase = clean_doc_toc(__snake_case ) _UpperCamelCase = False if new_scheduler_doc != scheduler_doc: _UpperCamelCase = True if overwrite: _UpperCamelCase = new_scheduler_doc if diff: if overwrite: _UpperCamelCase = api_doc with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def lowerCamelCase__ ( __snake_case=False ) -> List[Any]: """simple docstring""" with open(__snake_case, encoding='''utf-8''' ) as f: _UpperCamelCase = yaml.safe_load(f.read() ) # Get to the API doc _UpperCamelCase = 0 while content[api_idx]["title"] != "API": api_idx += 1 _UpperCamelCase = content[api_idx]['''sections'''] # Then to the model doc _UpperCamelCase = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 _UpperCamelCase = False _UpperCamelCase = api_doc[pipeline_idx]['''sections'''] _UpperCamelCase = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: _UpperCamelCase = pipeline_doc['''section'''] _UpperCamelCase = clean_doc_toc(__snake_case ) if overwrite: _UpperCamelCase = new_sub_pipeline_doc new_pipeline_docs.append(__snake_case ) # sort overall pipeline doc _UpperCamelCase = clean_doc_toc(__snake_case ) if new_pipeline_docs != pipeline_docs: _UpperCamelCase = True if overwrite: _UpperCamelCase = new_pipeline_docs if diff: if overwrite: _UpperCamelCase = api_doc with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.write(yaml.dump(__snake_case, allow_unicode=__snake_case ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") _a = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
709
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _a = logging.get_logger(__name__) _a = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'gpt_neo' lowercase__ = ['past_key_values'] lowercase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , __a=5_02_57 , __a=20_48 , __a=20_48 , __a=24 , __a=[[["global", "local"], 12]] , __a=16 , __a=None , __a=2_56 , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=0.1 , __a=1e-5 , __a=0.02 , __a=True , __a=5_02_56 , __a=5_02_56 , **__a , ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_layers _UpperCamelCase = num_heads _UpperCamelCase = intermediate_size _UpperCamelCase = window_size _UpperCamelCase = activation_function _UpperCamelCase = resid_dropout _UpperCamelCase = embed_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = layer_norm_epsilon _UpperCamelCase = initializer_range _UpperCamelCase = use_cache _UpperCamelCase = bos_token_id _UpperCamelCase = eos_token_id _UpperCamelCase = attention_types _UpperCamelCase = self.expand_attention_types_params(__a) if len(self.attention_layers) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''') super().__init__(bos_token_id=__a , eos_token_id=__a , **__a) @staticmethod def UpperCAmelCase ( __a) -> int: '''simple docstring''' _UpperCamelCase = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> str: """simple docstring""" import torch _UpperCamelCase = input.size() _UpperCamelCase = len(__snake_case ) _UpperCamelCase = shape[dimension] _UpperCamelCase = torch.arange(0, __snake_case, __snake_case ) _UpperCamelCase = torch.div(sizedim - size, __snake_case, rounding_mode='''floor''' ) + 1 _UpperCamelCase = torch.arange(__snake_case ) + low_indices[:min_length][:, None] _UpperCamelCase = [slice(__snake_case )] * rank _UpperCamelCase = indices _UpperCamelCase = input[s] _UpperCamelCase = list(range(0, rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> str: """simple docstring""" import torch _UpperCamelCase = torch.arange(1, __snake_case ) _UpperCamelCase = torch.remainder(__snake_case, __snake_case ) _UpperCamelCase = remainders == 0 _UpperCamelCase = candidates[divisor_indices] _UpperCamelCase = torch.max(__snake_case ) return largest_divisor, torch.div(__snake_case, __snake_case, rounding_mode='''floor''' ) class _UpperCAmelCase( lowerCamelCase ): @property def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}}) if self.use_past: self.fill_with_past_key_values_(__a , direction='''inputs''') _UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return self._config.num_heads def UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]: '''simple docstring''' _UpperCamelCase = super(__a , self).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a) # We need to order the input in the way they appears in the forward() _UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch _UpperCamelCase , _UpperCamelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _UpperCamelCase = seqlen + 2 _UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCamelCase = [ (torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers) ] _UpperCamelCase = common_inputs['''attention_mask'''] if self.use_past: _UpperCamelCase = ordered_inputs['''attention_mask'''].dtype _UpperCamelCase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__a , __a , dtype=__a)] , dim=1) return ordered_inputs @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return 13
78
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
710
"""simple docstring""" import sys from collections import defaultdict class _UpperCAmelCase: def __init__( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = pos def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCamelCase = 2 * start + 1 else: _UpperCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child] _UpperCamelCase , _UpperCamelCase = ( heap[start], positions[start], ) _UpperCamelCase , _UpperCamelCase = temp, tempa _UpperCamelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , __a) self.top_to_bottom(__a , __a , __a , __a) def UpperCAmelCase ( self , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = position[index] while index != 0: _UpperCamelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCamelCase = heap[parent] _UpperCamelCase = position[parent] self.set_position(position[parent] , __a) else: _UpperCamelCase = val _UpperCamelCase = temp self.set_position(__a , __a) break _UpperCamelCase = parent else: _UpperCamelCase = val _UpperCamelCase = temp self.set_position(__a , 0) def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = len(__a) // 2 - 1 for i in range(__a , -1 , -1): self.top_to_bottom(__a , __a , len(__a) , __a) def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = positions[0] _UpperCamelCase = sys.maxsize self.top_to_bottom(__a , 0 , len(__a) , __a) return temp def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = Heap() _UpperCamelCase = [0] * len(__snake_case ) _UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCamelCase = [] for vertex in range(len(__snake_case ) ): distance_tv.append(sys.maxsize ) positions.append(__snake_case ) heap.node_position.append(__snake_case ) _UpperCamelCase = [] _UpperCamelCase = 1 _UpperCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCamelCase = 0 _UpperCamelCase = distance heap.heapify(__snake_case, __snake_case ) for _ in range(1, len(__snake_case ) ): _UpperCamelCase = heap.delete_minimum(__snake_case, __snake_case ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__snake_case )] ): _UpperCamelCase = distance heap.bottom_to_top( __snake_case, heap.get_position(__snake_case ), __snake_case, __snake_case ) _UpperCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _a = int(input("""Enter number of edges: """).strip()) _a = defaultdict(list) for _ in range(edges_number): _a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
78
0
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore _a = namedtuple("""covid_data""", """cases deaths recovered""") def lowerCamelCase__ ( __snake_case = "https://www.worldometers.info/coronavirus/" ) -> covid_data: """simple docstring""" _UpperCamelCase = '''//div[@class = "maincounter-number"]/span/text()''' return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) ) _a = """Total COVID-19 cases in the world: {} Total deaths due to COVID-19 in the world: {} Total COVID-19 patients recovered in the world: {}""" print(fmt.format(*covid_stats()))
711
"""simple docstring""" import json import sys def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" with open(__snake_case, encoding='''utf-8''' ) as f: _UpperCamelCase = json.load(__snake_case ) _UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(__snake_case ): _UpperCamelCase = results[benchmark_name] _UpperCamelCase = benchmark_name.split('''/''' )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) _UpperCamelCase = '''| metric |''' _UpperCamelCase = '''|--------|''' _UpperCamelCase = '''| new / old (diff) |''' for metric_name in sorted(__snake_case ): _UpperCamelCase = benchmark_res[metric_name] _UpperCamelCase = metric_vals['''new'''] _UpperCamelCase = metric_vals.get('''old''', __snake_case ) _UpperCamelCase = metric_vals.get('''diff''', __snake_case ) _UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None''' if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(__snake_case ) ) if __name__ == "__main__": _a = sys.argv[1] _a = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
78
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = 0 def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''') self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__a) / '''preprocessor_config.json''' _UpperCamelCase = Path(__a) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__a , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__a , '''w''')) _UpperCamelCase = AutoImageProcessor.from_pretrained(__a) self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__a) / '''preprocessor_config.json''' _UpperCamelCase = Path(__a) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__a , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__a , '''w''')) _UpperCamelCase = AutoImageProcessor.from_pretrained(__a) self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = CLIPConfig() # Create a dummy config file with image_proceesor_type _UpperCamelCase = Path(__a) / '''preprocessor_config.json''' _UpperCamelCase = Path(__a) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__a , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__a , '''w''')) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _UpperCamelCase = AutoImageProcessor.from_pretrained(__a).to_dict() config_dict.pop('''image_processor_type''') _UpperCamelCase = CLIPImageProcessor(**__a) # save in new folder model_config.save_pretrained(__a) config.save_pretrained(__a) _UpperCamelCase = AutoImageProcessor.from_pretrained(__a) # make sure private variable is not incorrectly saved _UpperCamelCase = json.loads(config.to_json_string()) self.assertTrue('''_processor_class''' not in dict_as_saved) self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> int: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__a) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__a , '''w''') , ) _UpperCamelCase = AutoImageProcessor.from_pretrained(__a) self.assertIsInstance(__a , __a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' with self.assertRaisesRegex( __a , '''clip-base is not a local folder and is not a valid model identifier'''): _UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''') def UpperCAmelCase ( self) -> Dict: '''simple docstring''' with self.assertRaisesRegex( __a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): _UpperCamelCase = AutoImageProcessor.from_pretrained(__a , revision='''aaaaaa''') def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex( __a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__a): _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') # If remote code is disabled, we can't load this config. with self.assertRaises(__a): _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__a) _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__a) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a) _UpperCamelCase = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''') def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' try: AutoConfig.register('''custom''' , __a) AutoImageProcessor.register(__a , __a) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__a): AutoImageProcessor.register(__a , __a) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase = Path(__a) / '''preprocessor_config.json''' _UpperCamelCase = Path(__a) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__a , '''w''') , ) json.dump({'''model_type''': '''clip'''} , open(__a , '''w''')) _UpperCamelCase = CustomImageProcessor.from_pretrained(__a) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__a) _UpperCamelCase = AutoImageProcessor.from_pretrained(__a) self.assertIsInstance(__a , __a) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' class _UpperCAmelCase( lowerCamelCase ): lowercase__ = True try: AutoConfig.register('''custom''' , __a) AutoImageProcessor.register(__a , __a) # If remote code is not set, the default is to use local _UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''') self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote code is disabled, we load the local one. _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__a) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(image_processor.is_local) # If remote is enabled, we load from the Hub _UpperCamelCase = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__a) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''') self.assertTrue(not hasattr(__a , '''is_local''')) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
712
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> Tuple: """simple docstring""" _UpperCamelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCamelCase = '''''' else: _UpperCamelCase = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCamelCase = in_proj_bias[: config.hidden_size] _UpperCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCamelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = dct.pop(__snake_case ) _UpperCamelCase = val def lowerCamelCase__ ( ) -> Dict: """simple docstring""" _UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = ViTConfig() _UpperCamelCase = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": _UpperCamelCase = True _UpperCamelCase = int(vit_name[-12:-10] ) _UpperCamelCase = int(vit_name[-9:-6] ) else: _UpperCamelCase = 10_00 _UpperCamelCase = '''huggingface/label-files''' _UpperCamelCase = '''imagenet-1k-id2label.json''' _UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) ) _UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = int(vit_name[-6:-4] ) _UpperCamelCase = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): _UpperCamelCase = 1_92 _UpperCamelCase = 7_68 _UpperCamelCase = 12 _UpperCamelCase = 3 elif vit_name[9:].startswith('''small''' ): _UpperCamelCase = 3_84 _UpperCamelCase = 15_36 _UpperCamelCase = 12 _UpperCamelCase = 6 else: pass else: if vit_name[4:].startswith('''small''' ): _UpperCamelCase = 7_68 _UpperCamelCase = 23_04 _UpperCamelCase = 8 _UpperCamelCase = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): _UpperCamelCase = 10_24 _UpperCamelCase = 40_96 _UpperCamelCase = 24 _UpperCamelCase = 16 elif vit_name[4:].startswith('''huge''' ): _UpperCamelCase = 12_80 _UpperCamelCase = 51_20 _UpperCamelCase = 32 _UpperCamelCase = 16 # load original model from timm _UpperCamelCase = timm.create_model(__snake_case, pretrained=__snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCamelCase = timm_model.state_dict() if base_model: remove_classification_head_(__snake_case ) _UpperCamelCase = create_rename_keys(__snake_case, __snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCamelCase = ViTModel(__snake_case ).eval() else: _UpperCamelCase = ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: _UpperCamelCase = DeiTImageProcessor(size=config.image_size ) else: _UpperCamelCase = ViTImageProcessor(size=config.image_size ) _UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ) _UpperCamelCase = encoding['''pixel_values'''] _UpperCamelCase = model(__snake_case ) if base_model: _UpperCamelCase = timm_model.forward_features(__snake_case ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__snake_case, outputs.pooler_output, atol=1e-3 ) else: _UpperCamelCase = timm_model(__snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1e-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _a = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
78
0
"""simple docstring""" import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _a = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class a_( lowerCamelCase ): def __init__( self , __a , __a , __a=None , __a=1) -> Dict: '''simple docstring''' _UpperCamelCase = tokenizer _UpperCamelCase = dataset _UpperCamelCase = len(__a) if n_tasks is None else n_tasks _UpperCamelCase = n_copies def __iter__( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] for task in range(self.n_tasks): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip()) _UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors='''pt''') for task in range(self.n_tasks): for _ in range(self.n_copies): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a_( lowerCamelCase ): def __init__( self , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = start_length _UpperCamelCase = eof_strings _UpperCamelCase = tokenizer def __call__( self , __a , __a , **__a) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :]) _UpperCamelCase = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings)) return all(__a) def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = re.split('''(%s)''' % '''|'''.join(__snake_case ), __snake_case ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case=20, **__snake_case ) -> Any: """simple docstring""" _UpperCamelCase = defaultdict(__snake_case ) # dict of list of generated tokens for step, batch in tqdm(enumerate(__snake_case ) ): with torch.no_grad(): _UpperCamelCase = batch['''ids'''].shape[-1] _UpperCamelCase = accelerator.unwrap_model(__snake_case ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']], num_return_sequences=__snake_case, **__snake_case ) # each task is generated batch_size times _UpperCamelCase = batch['''task_id'''].repeat(__snake_case ) _UpperCamelCase = accelerator.pad_across_processes( __snake_case, dim=1, pad_index=tokenizer.pad_token_id ) _UpperCamelCase , _UpperCamelCase = accelerator.gather((generated_tokens, generated_tasks) ) _UpperCamelCase = generated_tokens.cpu().numpy() _UpperCamelCase = generated_tasks.cpu().numpy() for task, generated_tokens in zip(__snake_case, __snake_case ): gen_token_dict[task].append(__snake_case ) _UpperCamelCase = [[] for _ in range(__snake_case )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: _UpperCamelCase = tokenizer.decode(__snake_case, skip_special_tokens=__snake_case, clean_up_tokenization_spaces=__snake_case ) code_gens[task].append(remove_last_block(__snake_case ) ) return code_gens def lowerCamelCase__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = HfArgumentParser(__snake_case ) _UpperCamelCase = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric _UpperCamelCase = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing _UpperCamelCase = '''false''' if args.num_workers is None: _UpperCamelCase = multiprocessing.cpu_count() # Use dataset load to feed to accelerate _UpperCamelCase = Accelerator() set_seed(args.seed, device_specific=__snake_case ) # Load model and tokenizer _UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt ) _UpperCamelCase = tokenizer.eos_token _UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings _UpperCamelCase = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0, __snake_case, __snake_case )] ), } # Load evaluation dataset and metric _UpperCamelCase = load_dataset('''openai_humaneval''' ) _UpperCamelCase = load_metric('''code_eval''' ) _UpperCamelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) _UpperCamelCase = args.n_samples // args.batch_size _UpperCamelCase = TokenizedDataset(__snake_case, human_eval['''test'''], n_copies=__snake_case, n_tasks=__snake_case ) # do not confuse args.batch_size, which is actually the num_return_sequences _UpperCamelCase = DataLoader(__snake_case, batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: _UpperCamelCase = code_eval_metric.compute(references=[''''''], predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception _UpperCamelCase , _UpperCamelCase = accelerator.prepare(__snake_case, __snake_case ) _UpperCamelCase = complete_code( __snake_case, __snake_case, __snake_case, __snake_case, n_tasks=__snake_case, batch_size=args.batch_size, **__snake_case, ) if accelerator.is_main_process: _UpperCamelCase = [] for task in tqdm(range(__snake_case ) ): _UpperCamelCase = human_eval['''test'''][task]['''test'''] _UpperCamelCase = F'''check({human_eval["test"][task]["entry_point"]})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric _UpperCamelCase , _UpperCamelCase = code_eval_metric.compute( references=__snake_case, predictions=__snake_case, num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file, '''w''' ) as fp: json.dump(__snake_case, __snake_case ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
713
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = embedding_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_hidden_groups _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) _UpperCamelCase = model(__a , token_type_ids=__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = AlbertForPreTraining(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertForMaskedLM(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = AlbertForQuestionAnswering(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = AlbertForSequenceClassification(__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = AlbertForTokenClassification(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.num_choices _UpperCamelCase = AlbertForMultipleChoice(config=__a) model.to(__a) model.eval() _UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowercase__ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True def UpperCAmelCase ( self , __a , __a , __a=False) -> List[str]: '''simple docstring''' _UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class in get_values(__a): _UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a) _UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a) return inputs_dict def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = AlbertModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCamelCase = type self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AlbertModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''') _UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]]) _UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCamelCase = model(__a , attention_mask=__a)[0] _UpperCamelCase = torch.Size((1, 11, 7_68)) self.assertEqual(output.shape , __a) _UpperCamelCase = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
78
0
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=64 , __a=5 , __a=4 , __a=64 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' return MPNetConfig.from_pretrained('''microsoft/mpnet-base''') def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = MPNetModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , __a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a) -> str: '''simple docstring''' _UpperCamelCase = MPNetForQuestionAnswering(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a) -> Tuple: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = MPNetForSequenceClassification(__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = self.num_choices _UpperCamelCase = MPNetForMultipleChoice(config=__a) model.to(__a) model.eval() _UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = model( __a , attention_mask=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = MPNetForTokenClassification(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) lowercase__ = ( { 'feature-extraction': MPNetModel, 'fill-mask': MPNetForMaskedLM, 'question-answering': MPNetForQuestionAnswering, 'text-classification': MPNetForSequenceClassification, 'token-classification': MPNetForTokenClassification, 'zero-shot': MPNetForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = False lowercase__ = True def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = MPNetModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__a) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__a) @require_torch class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = MPNetModel.from_pretrained('''microsoft/mpnet-base''') _UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]]) _UpperCamelCase = model(__a)[0] _UpperCamelCase = torch.Size((1, 11, 7_68)) self.assertEqual(output.shape , __a) _UpperCamelCase = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]]) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
714
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = np.inf def set_batch_size(__snake_case ) -> None: nonlocal batch_size if isinstance(__snake_case, __snake_case ): _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__snake_case, __snake_case ): _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__snake_case, __snake_case ) and feature.dtype == "binary": _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__snake_case, __snake_case ) return None if batch_size is np.inf else batch_size class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Dict: '''simple docstring''' super().__init__( __a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) _UpperCamelCase = path_or_paths if isinstance(__a , __a) else {self.split: path_or_paths} _UpperCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCamelCase = Parquet( cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , ) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__a , in_memory=self.keep_in_memory) return dataset class _UpperCAmelCase: def __init__( self , __a , __a , __a = None , **__a , ) -> Dict: '''simple docstring''' _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size or get_writer_batch_size(dataset.features) _UpperCamelCase = parquet_writer_kwargs def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: _UpperCamelCase = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs) else: _UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs) return written def UpperCAmelCase ( self , __a , __a , **__a) -> int: '''simple docstring''' _UpperCamelCase = 0 _UpperCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __a) _UpperCamelCase = self.dataset.features.arrow_schema _UpperCamelCase = pq.ParquetWriter(__a , schema=__a , **__a) for offset in logging.tqdm( range(0 , len(self.dataset) , __a) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCamelCase = query_table( table=self.dataset._data , key=slice(__a , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__a) written += batch.nbytes writer.close() return written
78
0
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _a = logging.get_logger(__name__) _a = """▁""" _a = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", } _a = { """vocab_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json""" ), }, """spm_file""": { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model""" ) }, } _a = { """facebook/s2t-small-librispeech-asr""": 1024, } _a = ["""pt""", """fr""", """ru""", """nl""", """ro""", """it""", """es""", """de"""] _a = {"""mustc""": MUSTC_LANGS} class _UpperCAmelCase( lowerCamelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = MAX_MODEL_INPUT_SIZES lowercase__ = ['input_ids', 'attention_mask'] lowercase__ = [] def __init__( self , __a , __a , __a="<s>" , __a="</s>" , __a="<pad>" , __a="<unk>" , __a=False , __a=False , __a=None , __a=None , __a = None , **__a , ) -> None: '''simple docstring''' _UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , do_upper_case=__a , do_lower_case=__a , tgt_lang=__a , lang_codes=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) _UpperCamelCase = do_upper_case _UpperCamelCase = do_lower_case _UpperCamelCase = load_json(__a) _UpperCamelCase = {v: k for k, v in self.encoder.items()} _UpperCamelCase = spm_file _UpperCamelCase = load_spm(__a , self.sp_model_kwargs) if lang_codes is not None: _UpperCamelCase = lang_codes _UpperCamelCase = LANGUAGES[lang_codes] _UpperCamelCase = [F'''<lang:{lang}>''' for lang in self.langs] _UpperCamelCase = {lang: self.sp_model.PieceToId(F'''<lang:{lang}>''') for lang in self.langs} _UpperCamelCase = self.lang_tokens _UpperCamelCase = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang) else: _UpperCamelCase = {} @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return len(self.encoder) @property def UpperCAmelCase ( self) -> str: '''simple docstring''' return self._tgt_lang @tgt_lang.setter def UpperCAmelCase ( self , __a) -> None: '''simple docstring''' _UpperCamelCase = new_tgt_lang self.set_tgt_lang_special_tokens(__a) def UpperCAmelCase ( self , __a) -> None: '''simple docstring''' _UpperCamelCase = self.lang_code_to_id[tgt_lang] _UpperCamelCase = [lang_code_id] def UpperCAmelCase ( self , __a) -> List[str]: '''simple docstring''' return self.sp_model.encode(__a , out_type=__a) def UpperCAmelCase ( self , __a) -> Optional[int]: '''simple docstring''' return self.encoder.get(__a , self.encoder[self.unk_token]) def UpperCAmelCase ( self , __a) -> str: '''simple docstring''' return self.decoder.get(__a , self.unk_token) def UpperCAmelCase ( self , __a) -> str: '''simple docstring''' _UpperCamelCase = [] _UpperCamelCase = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: _UpperCamelCase = self.sp_model.decode(__a) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " _UpperCamelCase = [] else: current_sub_tokens.append(__a) _UpperCamelCase = self.sp_model.decode(__a) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def UpperCAmelCase ( self , __a , __a=None) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self , __a , __a = None , __a = False) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a) _UpperCamelCase = [1] * len(self.prefix_tokens) _UpperCamelCase = [1] if token_ids_a is None: return prefix_ones + ([0] * len(__a)) + suffix_ones return prefix_ones + ([0] * len(__a)) + ([0] * len(__a)) + suffix_ones def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.encoder.copy() vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.__dict__.copy() _UpperCamelCase = None return state def __setstate__( self , __a) -> None: '''simple docstring''' _UpperCamelCase = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs'''): _UpperCamelCase = {} _UpperCamelCase = load_spm(self.spm_file , self.sp_model_kwargs) def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]: '''simple docstring''' _UpperCamelCase = Path(__a) assert save_dir.is_dir(), F'''{save_directory} should be a directory''' _UpperCamelCase = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file'''] ) _UpperCamelCase = save_dir / ( (filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file'''] ) save_json(self.encoder , __a) if os.path.abspath(self.spm_file) != os.path.abspath(__a) and os.path.isfile(self.spm_file): copyfile(self.spm_file , __a) elif not os.path.isfile(self.spm_file): with open(__a , '''wb''') as fi: _UpperCamelCase = self.sp_model.serialized_model_proto() fi.write(__a) return (str(__a), str(__a)) def lowerCamelCase__ ( __snake_case, __snake_case ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" _UpperCamelCase = sentencepiece.SentencePieceProcessor(**__snake_case ) spm.Load(str(__snake_case ) ) return spm def lowerCamelCase__ ( __snake_case ) -> Union[Dict, List]: """simple docstring""" with open(__snake_case, '''r''' ) as f: return json.load(__snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> None: """simple docstring""" with open(__snake_case, '''w''' ) as f: json.dump(__snake_case, __snake_case, indent=2 )
715
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 20} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_flip_channel_order def UpperCAmelCase ( self) -> str: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = MobileViTImageProcessingTester(self) @property def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , '''do_resize''')) self.assertTrue(hasattr(__a , '''size''')) self.assertTrue(hasattr(__a , '''do_center_crop''')) self.assertTrue(hasattr(__a , '''center_crop''')) self.assertTrue(hasattr(__a , '''do_flip_channel_order''')) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'''shortest_edge''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' pass def UpperCAmelCase ( self) -> str: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase ( self) -> int: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
78
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _a = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""") @require_sentencepiece @require_tokenizers class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = GPTSwaTokenizer lowercase__ = False lowercase__ = True lowercase__ = False def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase = GPTSwaTokenizer(__a , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''') tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = '''This is a test''' _UpperCamelCase = '''This is a test''' return input_text, output_text def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = '''<s>''' _UpperCamelCase = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a) , __a) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a) , __a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<unk>''') self.assertEqual(vocab_keys[1] , '''<s>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(__a) , 20_00) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 20_00) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer(__a) _UpperCamelCase = tokenizer.tokenize('''This is a test''') self.assertListEqual(__a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a) , [4_65, 2_87, 2_65, 6_31, 8_42]) _UpperCamelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') # fmt: off self.assertListEqual( __a , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , ) # fmt: on _UpperCamelCase = tokenizer.convert_tokens_to_ids(__a) self.assertListEqual( __a , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , ) _UpperCamelCase = tokenizer.convert_ids_to_tokens(__a) # fmt: off self.assertListEqual( __a , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.''']) # fmt: on def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = GPTSwaTokenizer(__a) _UpperCamelCase = ['''This is a test''', '''I was born in 92000, and this is falsé.'''] _UpperCamelCase = [ [4_65, 2_87, 2_65, 6_31, 8_42], [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(__a , __a): self.assertListEqual(tokenizer.encode_fast(__a) , __a) # Test that decode_fast returns the input text for text, token_ids in zip(__a , __a): self.assertEqual(tokenizer.decode_fast(__a) , __a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = [ '''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''', '''Hey there, how are you doing this fine day?''', '''This is a text with a trailing spaces followed by a dot .''', '''Häj sväjs lillebrör! =)''', '''Det är inget fel på Mr. Cool''', ] # fmt: off _UpperCamelCase = {'''input_ids''': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__a , )
716
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['image_processor', 'tokenizer'] lowercase__ = 'OwlViTImageProcessor' lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __a=None , __a=None , **__a) -> List[Any]: '''simple docstring''' _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __a , ) _UpperCamelCase = kwargs.pop('''feature_extractor''') _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(__a , __a) def __call__( self , __a=None , __a=None , __a=None , __a="max_length" , __a="np" , **__a) -> List[str]: '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''') if text is not None: if isinstance(__a , __a) or (isinstance(__a , __a) and not isinstance(text[0] , __a)): _UpperCamelCase = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)] elif isinstance(__a , __a) and isinstance(text[0] , __a): _UpperCamelCase = [] # Maximum number of queries across batch _UpperCamelCase = max([len(__a) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__a) != max_num_queries: _UpperCamelCase = t + [''' '''] * (max_num_queries - len(__a)) _UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a) encodings.append(__a) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''') if return_tensors == "np": _UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0) _UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0) else: raise ValueError('''Target return tensor type could not be returned''') _UpperCamelCase = BatchEncoding() _UpperCamelCase = input_ids _UpperCamelCase = attention_mask if query_images is not None: _UpperCamelCase = BatchEncoding() _UpperCamelCase = self.image_processor( __a , return_tensors=__a , **__a).pixel_values _UpperCamelCase = query_pixel_values if images is not None: _UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a) if text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a) , tensor_type=__a) def UpperCAmelCase ( self , *__a , **__a) -> str: '''simple docstring''' return self.image_processor.post_process(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Dict: '''simple docstring''' return self.image_processor.post_process_object_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]: '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]: '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*__a , **__a) @property def UpperCAmelCase ( self) -> str: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , ) return self.image_processor_class @property def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , ) return self.image_processor
78
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = inspect.getfile(accelerate.test_utils) _UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_script.py''']) _UpperCamelCase = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_distributed_data_loop.py''']) _UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_ops.py''']) @require_multi_gpu def UpperCAmelCase ( self) -> int: '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices.''') _UpperCamelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(__a , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase ( self) -> str: '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices.''') _UpperCamelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path] print(F'''Command: {cmd}''') with patch_environment(omp_num_threads=1): execute_subprocess_async(__a , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__)] with patch_environment(omp_num_threads=1): execute_subprocess_async(__a , env=os.environ.copy()) @require_multi_gpu def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' print(F'''Found {torch.cuda.device_count()} devices, using 2 devices only''') _UpperCamelCase = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1'''): execute_subprocess_async(__a , env=os.environ.copy()) if __name__ == "__main__": _a = Accelerator() _a = (accelerator.state.process_index + 2, 10) _a = torch.randint(0, 10, shape).to(accelerator.device) _a = """""" _a = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." _a = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." _a = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _a = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["""PerceiverFeatureExtractor"""] _a = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
0
"""simple docstring""" import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _a = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _a = """main""" # Default branch name _a = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) _a = """aaaaaaa""" # This commit does not exist, so we should 404. _a = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes _a = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def lowerCamelCase__ ( ) -> str: """simple docstring""" print('''Welcome!''' ) yield print('''Bye!''' ) @contextlib.contextmanager def lowerCamelCase__ ( ) -> List[Any]: """simple docstring""" print('''Bonjour!''' ) yield print('''Au revoir!''' ) class _UpperCAmelCase( unittest.TestCase ): def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' assert transformers.__spec__ is not None assert importlib.util.find_spec('''transformers''') is not None class _UpperCAmelCase( unittest.TestCase ): @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO) def UpperCAmelCase ( self , __a) -> Dict: '''simple docstring''' with ContextManagers([]): print('''Transformers are awesome!''') # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''') @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO) def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' with ContextManagers([context_en()]): print('''Transformers are awesome!''') # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''') @unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO) def UpperCAmelCase ( self , __a) -> List[Any]: '''simple docstring''' with ContextManagers([context_fr(), context_en()]): print('''Transformers are awesome!''') # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''') @require_torch def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.assertEqual(find_labels(__a) , ['''labels''']) self.assertEqual(find_labels(__a) , ['''labels''', '''next_sentence_label''']) self.assertEqual(find_labels(__a) , ['''start_positions''', '''end_positions''']) class _UpperCAmelCase( lowerCamelCase ): pass self.assertEqual(find_labels(__a) , ['''labels''']) @require_tf def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' self.assertEqual(find_labels(__a) , ['''labels''']) self.assertEqual(find_labels(__a) , ['''labels''', '''next_sentence_label''']) self.assertEqual(find_labels(__a) , ['''start_positions''', '''end_positions''']) class _UpperCAmelCase( lowerCamelCase ): pass self.assertEqual(find_labels(__a) , ['''labels''']) @require_flax def UpperCAmelCase ( self) -> str: '''simple docstring''' self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) self.assertEqual(find_labels(__a) , []) class _UpperCAmelCase( lowerCamelCase ): pass self.assertEqual(find_labels(__a) , [])
718
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = patch_size _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 _UpperCamelCase = frequency_out_dimension * time_out_dimension _UpperCamelCase = num_patches + 2 def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = self.get_config() return config, input_values, labels def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ASTModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_values''': input_values} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowercase__ = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = ASTModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37) def UpperCAmelCase ( self) -> int: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) _UpperCamelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''input_values'''] self.assertListEqual(arg_names[:1] , __a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = ASTModel.from_pretrained(__a) self.assertIsNotNone(__a) def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" _UpperCamelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' ) _UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case ) return audio, sampling_rate @require_torch @require_torchaudio class _UpperCAmelCase( unittest.TestCase ): @cached_property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''') if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.default_feature_extractor _UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a) _UpperCamelCase = self.default_feature_extractor _UpperCamelCase , _UpperCamelCase = prepare_audio() _UpperCamelCase = audio.squeeze().numpy() _UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a) # forward pass with torch.no_grad(): _UpperCamelCase = model(**__a) # verify the logits _UpperCamelCase = torch.Size((1, 5_27)) self.assertEqual(outputs.logits.shape , __a) _UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _a = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
719
"""simple docstring""" def lowerCamelCase__ ( ) -> list[list[int]]: """simple docstring""" return [list(range(10_00 - i, -10_00 - i, -1 ) ) for i in range(10_00 )] _a = generate_large_matrix() _a = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( __snake_case ) -> None: """simple docstring""" assert all(row == sorted(__snake_case, reverse=__snake_case ) for row in grid ) assert all(list(__snake_case ) == sorted(__snake_case, reverse=__snake_case ) for col in zip(*__snake_case ) ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(__snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _UpperCamelCase = (left + right) // 2 _UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _UpperCamelCase = mid + 1 else: _UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__snake_case ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(grid[0] ) for i in range(len(__snake_case ) ): _UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(__snake_case ) * len(grid[0] )) - total def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 for row in grid: for i, number in enumerate(__snake_case ): if number < 0: total += len(__snake_case ) - i break return total def lowerCamelCase__ ( ) -> None: """simple docstring""" from timeit import timeit print('''Running benchmarks''' ) _UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _UpperCamelCase = timeit(F'''{func}(grid=grid)''', setup=__snake_case, number=5_00 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
78
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _a = 16 _a = 32 def lowerCamelCase__ ( __snake_case, __snake_case = 16 ) -> Optional[int]: _UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCamelCase = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(__snake_case ): # max_length=None => use the model max length (it's actually the default) _UpperCamelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=__snake_case, max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCamelCase = datasets.map( __snake_case, batched=__snake_case, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCamelCase = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(__snake_case ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCamelCase = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCamelCase = 16 elif accelerator.mixed_precision != "no": _UpperCamelCase = 8 else: _UpperCamelCase = None return tokenizer.pad( __snake_case, padding='''longest''', max_length=__snake_case, pad_to_multiple_of=__snake_case, return_tensors='''pt''', ) # Instantiate dataloaders. _UpperCamelCase = DataLoader( tokenized_datasets['''train'''], shuffle=__snake_case, collate_fn=__snake_case, batch_size=__snake_case ) _UpperCamelCase = DataLoader( tokenized_datasets['''validation'''], shuffle=__snake_case, collate_fn=__snake_case, batch_size=__snake_case ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _a = mocked_dataloaders # noqa: F811 def lowerCamelCase__ ( __snake_case, __snake_case ) -> int: if os.environ.get('''TESTING_MOCKED_DATALOADERS''', __snake_case ) == "1": _UpperCamelCase = 2 # New Code # _UpperCamelCase = int(args.gradient_accumulation_steps ) # Initialize accelerator _UpperCamelCase = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, gradient_accumulation_steps=__snake_case ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( '''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase = config['''lr'''] _UpperCamelCase = int(config['''num_epochs'''] ) _UpperCamelCase = int(config['''seed'''] ) _UpperCamelCase = int(config['''batch_size'''] ) _UpperCamelCase = evaluate.load('''glue''', '''mrpc''' ) set_seed(__snake_case ) _UpperCamelCase , _UpperCamelCase = get_dataloaders(__snake_case, __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''', return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCamelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCamelCase = AdamW(params=model.parameters(), lr=__snake_case ) # Instantiate scheduler _UpperCamelCase = get_linear_schedule_with_warmup( optimizer=__snake_case, num_warmup_steps=1_00, num_training_steps=(len(__snake_case ) * num_epochs), ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = accelerator.prepare( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(__snake_case ): _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = output.loss accelerator.backward(__snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase = model(**__snake_case ) _UpperCamelCase = outputs.logits.argmax(dim=-1 ) _UpperCamelCase , _UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case, references=__snake_case, ) _UpperCamelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''', __snake_case ) def lowerCamelCase__ ( ) -> str: _UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''', type=__snake_case, default=__snake_case, choices=['''no''', '''fp16''', '''bf16''', '''fp8'''], help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''', ) # New Code # parser.add_argument( '''--gradient_accumulation_steps''', type=__snake_case, default=1, help='''The number of minibatches to be ran before gradients are accumulated.''', ) parser.add_argument('''--cpu''', action='''store_true''', help='''If passed, will train on the CPU.''' ) _UpperCamelCase = parser.parse_args() _UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(__snake_case, __snake_case ) if __name__ == "__main__": main()
720
"""simple docstring""" import copy import re class _UpperCAmelCase: lowercase__ = 'hp' lowercase__ = {} lowercase__ = None @classmethod def UpperCAmelCase ( cls , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = prefix _UpperCamelCase = defaults cls.build_naming_info() @staticmethod def UpperCAmelCase ( __a , __a) -> Union[str, Any]: '''simple docstring''' if len(__a) == 0: return "" _UpperCamelCase = None if any(char.isdigit() for char in word): raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a) + 1): _UpperCamelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCamelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a): _UpperCamelCase = '''''' while integer != 0: _UpperCamelCase = chr(ord('''A''') + integer % 10) + s integer //= 10 return s _UpperCamelCase = 0 while True: _UpperCamelCase = word + '''#''' + int_to_alphabetic(__a) if sword in info["reverse_short_word"]: continue else: _UpperCamelCase = sword break _UpperCamelCase = short_word _UpperCamelCase = word return short_word @staticmethod def UpperCAmelCase ( __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = param_name.split('''_''') _UpperCamelCase = [TrialShortNamer.shortname_for_word(__a , __a) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCamelCase = ['''''', '''_'''] for separator in separators: _UpperCamelCase = separator.join(__a) if shortname not in info["reverse_short_param"]: _UpperCamelCase = shortname _UpperCamelCase = param_name return shortname return param_name @staticmethod def UpperCAmelCase ( __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = TrialShortNamer.shortname_for_key(__a , __a) _UpperCamelCase = short_name _UpperCamelCase = param_name @classmethod def UpperCAmelCase ( cls) -> Any: '''simple docstring''' if cls.NAMING_INFO is not None: return _UpperCamelCase = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _UpperCamelCase = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(__a , __a) _UpperCamelCase = info @classmethod def UpperCAmelCase ( cls , __a) -> Optional[Any]: '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _UpperCamelCase = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'''You should provide a default value for the param name {k} with value {v}''') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCamelCase = cls.NAMING_INFO['''short_param'''][k] if isinstance(__a , __a): _UpperCamelCase = 1 if v else 0 _UpperCamelCase = '''''' if isinstance(__a , (int, float)) else '''-''' _UpperCamelCase = F'''{key}{sep}{v}''' name.append(__a) return "_".join(__a) @classmethod def UpperCAmelCase ( cls , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = repr[len(cls.PREFIX) + 1 :] if repr == "": _UpperCamelCase = [] else: _UpperCamelCase = repr.split('''_''') _UpperCamelCase = {} for value in values: if "-" in value: _UpperCamelCase , _UpperCamelCase = value.split('''-''') else: _UpperCamelCase = re.sub('''[0-9.]''' , '''''' , __a) _UpperCamelCase = float(re.sub('''[^0-9.]''' , '''''' , __a)) _UpperCamelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k] _UpperCamelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCamelCase = cls.DEFAULTS[k] return parameters
78
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = len(__snake_case ) for i in range(length - 1 ): _UpperCamelCase = i for k in range(i + 1, __snake_case ): if collection[k] < collection[least]: _UpperCamelCase = k if least != i: _UpperCamelCase , _UpperCamelCase = (collection[i], collection[least]) return collection if __name__ == "__main__": _a = input("""Enter numbers separated by a comma:\n""").strip() _a = [int(item) for item in user_input.split(""",""")] print(selection_sort(unsorted))
721
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) ) _UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) ) _UpperCamelCase = 0.01 with locka.acquire(): with pytest.raises(__snake_case ): _UpperCamelCase = time.time() locka.acquire(__snake_case ) assert time.time() - _start > timeout def lowerCamelCase__ ( __snake_case ) -> List[Any]: """simple docstring""" _UpperCamelCase = '''a''' * 10_00 + '''.lock''' _UpperCamelCase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(__snake_case ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 _UpperCamelCase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__snake_case ): locka.acquire(0 )
78
0
"""simple docstring""" from math import sqrt def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' must been an int and positive" _UpperCamelCase = True # 0 and 1 are none primes. if number <= 1: _UpperCamelCase = False for divisor in range(2, int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: _UpperCamelCase = False break # precondition assert isinstance(__snake_case, __snake_case ), "'status' must been from type bool" return status def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N _UpperCamelCase = list(range(2, n + 1 ) ) _UpperCamelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1, len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): _UpperCamelCase = 0 # filters actual prime numbers. _UpperCamelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2" _UpperCamelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and number >= 0, "'number' must been an int and >= 0" _UpperCamelCase = [] # this list will be returns of the function. # potential prime number factors. _UpperCamelCase = 2 _UpperCamelCase = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase = 0 # prime factorization of 'number' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = max(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase = 0 # prime factorization of 'number' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = min(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0, __snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0, __snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" _UpperCamelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' _UpperCamelCase = get_prime_numbers(__snake_case ) _UpperCamelCase = len(__snake_case ) # run variable for while-loops. _UpperCamelCase = 0 _UpperCamelCase = None # exit variable. for break up the loops _UpperCamelCase = True while i < len_pn and loop: _UpperCamelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: _UpperCamelCase = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case, __snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCamelCase__ ( __snake_case, __snake_case ): """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase = 0 while numbera != 0: _UpperCamelCase = numbera % numbera _UpperCamelCase = numbera _UpperCamelCase = rest # precondition assert isinstance(__snake_case, __snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCamelCase__ ( __snake_case, __snake_case ): """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = max(__snake_case, __snake_case ) _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: _UpperCamelCase = prime_fac_a.count(__snake_case ) _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case, __snake_case ) ): ans *= n else: _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'number' must been a positive int" _UpperCamelCase = 0 _UpperCamelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case, __snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCamelCase__ ( __snake_case, __snake_case ): """simple docstring""" assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" _UpperCamelCase = p_number_a + 1 # jump to the next number _UpperCamelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case, __snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 1), "'n' must been int and >= 1" _UpperCamelCase = [] # will be returned. for divisor in range(1, n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" _UpperCamelCase = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case, __snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCamelCase__ ( __snake_case, __snake_case ): """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. _UpperCamelCase = gcd(abs(__snake_case ), abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case, __snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been a int and >= 0" _UpperCamelCase = 1 # this will be return. for factor in range(1, n + 1 ): ans *= factor return ans def lowerCamelCase__ ( __snake_case ): """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been an int and >= 0" _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = 1 # this will be return for _ in range(n - 1 ): _UpperCamelCase = ans ans += fiba _UpperCamelCase = tmp return ans
700
"""simple docstring""" from math import sqrt def lowerCamelCase__ ( __snake_case ) -> bool: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' must been an int and positive" _UpperCamelCase = True # 0 and 1 are none primes. if number <= 1: _UpperCamelCase = False for divisor in range(2, int(round(sqrt(__snake_case ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: _UpperCamelCase = False break # precondition assert isinstance(__snake_case, __snake_case ), "'status' must been from type bool" return status def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N _UpperCamelCase = list(range(2, n + 1 ) ) _UpperCamelCase = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(__snake_case ) ): for j in range(i + 1, len(__snake_case ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): _UpperCamelCase = 0 # filters actual prime numbers. _UpperCamelCase = [x for x in begin_list if x != 0] # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n > 2), "'N' must been an int and > 2" _UpperCamelCase = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2, n + 1 ): if is_prime(__snake_case ): ans.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and number >= 0, "'number' must been an int and >= 0" _UpperCamelCase = [] # this list will be returns of the function. # potential prime number factors. _UpperCamelCase = 2 _UpperCamelCase = number if number == 0 or number == 1: ans.append(__snake_case ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(__snake_case ): while quotient != 1: if is_prime(__snake_case ) and (quotient % factor == 0): ans.append(__snake_case ) quotient /= factor else: factor += 1 else: ans.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type list" return ans def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase = 0 # prime factorization of 'number' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = max(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int" return ans def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number >= 0 ), "'number' bust been an int and >= 0" _UpperCamelCase = 0 # prime factorization of 'number' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = min(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ), "'ans' must been from type int" return ans def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ), "'number' must been an int" assert isinstance(number % 2 == 0, __snake_case ), "compare bust been from type bool" return number % 2 == 0 def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ), "'number' must been an int" assert isinstance(number % 2 != 0, __snake_case ), "compare bust been from type bool" return number % 2 != 0 def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and (number > 2) and is_even(__snake_case ) ), "'number' must been an int, even and > 2" _UpperCamelCase = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' _UpperCamelCase = get_prime_numbers(__snake_case ) _UpperCamelCase = len(__snake_case ) # run variable for while-loops. _UpperCamelCase = 0 _UpperCamelCase = None # exit variable. for break up the loops _UpperCamelCase = True while i < len_pn and loop: _UpperCamelCase = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: _UpperCamelCase = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(__snake_case, __snake_case ) and (len(__snake_case ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCamelCase__ ( __snake_case, __snake_case ) -> str: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase = 0 while numbera != 0: _UpperCamelCase = numbera % numbera _UpperCamelCase = numbera _UpperCamelCase = rest # precondition assert isinstance(__snake_case, __snake_case ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." _UpperCamelCase = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' _UpperCamelCase = prime_factorization(__snake_case ) _UpperCamelCase = prime_factorization(__snake_case ) elif numbera == 1 or numbera == 1: _UpperCamelCase = [] _UpperCamelCase = [] _UpperCamelCase = max(__snake_case, __snake_case ) _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: _UpperCamelCase = prime_fac_a.count(__snake_case ) _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(max(__snake_case, __snake_case ) ): ans *= n else: _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: _UpperCamelCase = prime_fac_a.count(__snake_case ) for _ in range(__snake_case ): ans *= n done.append(__snake_case ) # precondition assert isinstance(__snake_case, __snake_case ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'number' must been a positive int" _UpperCamelCase = 0 _UpperCamelCase = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(__snake_case ): ans += 1 # precondition assert isinstance(__snake_case, __snake_case ) and is_prime( __snake_case ), "'ans' must been a prime number and from type int" return ans def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple: """simple docstring""" assert ( is_prime(__snake_case ) and is_prime(__snake_case ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" _UpperCamelCase = p_number_a + 1 # jump to the next number _UpperCamelCase = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(__snake_case ): number += 1 while number < p_number_a: ans.append(__snake_case ) number += 1 # fetch the next prime number. while not is_prime(__snake_case ): number += 1 # precondition assert ( isinstance(__snake_case, __snake_case ) and ans[0] != p_number_a and ans[len(__snake_case ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCamelCase__ ( __snake_case ) -> List[Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 1), "'n' must been int and >= 1" _UpperCamelCase = [] # will be returned. for divisor in range(1, n + 1 ): if n % divisor == 0: ans.append(__snake_case ) # precondition assert ans[0] == 1 and ans[len(__snake_case ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and ( number > 1 ), "'number' must been an int and >= 1" _UpperCamelCase = get_divisors(__snake_case ) # precondition assert ( isinstance(__snake_case, __snake_case ) and (divisors[0] == 1) and (divisors[len(__snake_case ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]: """simple docstring""" assert ( isinstance(__snake_case, __snake_case ) and isinstance(__snake_case, __snake_case ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. _UpperCamelCase = gcd(abs(__snake_case ), abs(__snake_case ) ) # precondition assert ( isinstance(__snake_case, __snake_case ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been a int and >= 0" _UpperCamelCase = 1 # this will be return. for factor in range(1, n + 1 ): ans *= factor return ans def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" assert isinstance(__snake_case, __snake_case ) and (n >= 0), "'n' must been an int and >= 0" _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = 1 # this will be return for _ in range(n - 1 ): _UpperCamelCase = ans ans += fiba _UpperCamelCase = tmp return ans
78
0
"""simple docstring""" from pathlib import Path import fire def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int: """simple docstring""" _UpperCamelCase = Path(__snake_case ) _UpperCamelCase = Path(__snake_case ) dest_dir.mkdir(exist_ok=__snake_case ) for path in src_dir.iterdir(): _UpperCamelCase = [x.rstrip() for x in list(path.open().readlines() )][:n] _UpperCamelCase = dest_dir.joinpath(path.name ) print(__snake_case ) dest_path.open('''w''' ).write('''\n'''.join(__snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
701
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings _a = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether to use SortishSampler or not.'} ) lowercase__ = field( default=lowerCamelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowercase__ = field( default=lowerCamelCase , metadata={ 'help': ( 'The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `max_length` value of the model configuration.' ) } , ) lowercase__ = field( default=lowerCamelCase , metadata={ 'help': ( 'The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ' 'to the `num_beams` value of the model configuration.' ) } , ) lowercase__ = field( default=lowerCamelCase , metadata={ 'help': 'Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.' } , ) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = super().to_dict() for k, v in d.items(): if isinstance(__a , __a): _UpperCamelCase = v.to_dict() return d
78
0
"""simple docstring""" import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" _UpperCamelCase = int(__snake_case ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = t // 36_00, (t // 60) % 60, t % 60 return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}''' def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case=3_00 ) -> Dict: """simple docstring""" return F''' <div> {prefix} <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress> {label} </div> ''' def lowerCamelCase__ ( __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += F''' <th>{i}</th>\n''' html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: _UpperCamelCase = F'''{elt:.6f}''' if isinstance(__snake_case, __snake_case ) else str(__snake_case ) html_code += F''' <td>{elt}</td>\n''' html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class _UpperCAmelCase: lowercase__ = 5 lowercase__ = 0.2 def __init__( self , __a , __a = None , __a = True , __a = None , __a = 3_00 , ) -> List[Any]: '''simple docstring''' _UpperCamelCase = total _UpperCamelCase = '''''' if prefix is None else prefix _UpperCamelCase = leave _UpperCamelCase = parent _UpperCamelCase = width _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None def UpperCAmelCase ( self , __a , __a = False , __a = None) -> int: '''simple docstring''' _UpperCamelCase = value if comment is not None: _UpperCamelCase = comment if self.last_value is None: _UpperCamelCase = _UpperCamelCase = time.time() _UpperCamelCase = _UpperCamelCase = value _UpperCamelCase = _UpperCamelCase = None _UpperCamelCase = self.warmup _UpperCamelCase = 1 self.update_bar(__a) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total): if self.first_calls > 0: self.first_calls -= 1 _UpperCamelCase = time.time() _UpperCamelCase = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: _UpperCamelCase = self.elapsed_time / (value - self.start_value) else: _UpperCamelCase = None if value >= self.total: _UpperCamelCase = self.total _UpperCamelCase = None if not self.leave: self.close() elif self.average_time_per_item is not None: _UpperCamelCase = self.average_time_per_item * (self.total - value) self.update_bar(__a) _UpperCamelCase = value _UpperCamelCase = current_time if self.average_time_per_item is None: _UpperCamelCase = 1 else: _UpperCamelCase = max(int(self.update_every / self.average_time_per_item) , 1) def UpperCAmelCase ( self , __a , __a=None) -> str: '''simple docstring''' _UpperCamelCase = ''' ''' * (len(str(self.total)) - len(str(__a))) + str(__a) if self.elapsed_time is None: _UpperCamelCase = F'''[{spaced_value}/{self.total} : < :''' elif self.predicted_remaining is None: _UpperCamelCase = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}''' else: _UpperCamelCase = ( F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <''' F''' {format_time(self.predicted_remaining)}''' ) self.label += F''', {1/self.average_time_per_item:.2f} it/s''' self.label += "]" if self.comment is None or len(self.comment) == 0 else F''', {self.comment}]''' self.display() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: _UpperCamelCase = disp.display(disp.HTML(self.html_code) , display_id=__a) else: self.output.update(disp.HTML(self.html_code)) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''')) class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a=None) -> Dict: '''simple docstring''' super().__init__(__a) _UpperCamelCase = None if column_names is None else [column_names] _UpperCamelCase = None def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: _UpperCamelCase = disp.display(disp.HTML(self.html_code) , display_id=__a) else: self.output.update(disp.HTML(self.html_code)) def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' if self.inner_table is None: _UpperCamelCase = [list(values.keys()), list(values.values())] else: _UpperCamelCase = self.inner_table[0] if len(self.inner_table) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(__a) _UpperCamelCase = columns self.inner_table.append([values[c] for c in columns]) def UpperCAmelCase ( self , __a , __a=None , __a=3_00) -> List[str]: '''simple docstring''' _UpperCamelCase = NotebookProgressBar(__a , prefix=__a , parent=self , width=__a) return self.child_bar def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = None self.display() class _UpperCAmelCase( lowerCamelCase ): def __init__( self) -> str: '''simple docstring''' _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = False def UpperCAmelCase ( self , __a , __a , __a , **__a) -> int: '''simple docstring''' _UpperCamelCase = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' _UpperCamelCase = 0 _UpperCamelCase = 0 _UpperCamelCase = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''') _UpperCamelCase = NotebookTrainingTracker(state.max_steps , __a) def UpperCAmelCase ( self , __a , __a , __a , **__a) -> int: '''simple docstring''' _UpperCamelCase = int(state.epoch) if int(state.epoch) == state.epoch else F'''{state.epoch:.2f}''' self.training_tracker.update( state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , ) _UpperCamelCase = False def UpperCAmelCase ( self , __a , __a , __a , __a=None , **__a) -> List[str]: '''simple docstring''' if not has_length(__a): return if self.prediction_bar is None: if self.training_tracker is not None: _UpperCamelCase = self.training_tracker.add_child(len(__a)) else: _UpperCamelCase = NotebookProgressBar(len(__a)) self.prediction_bar.update(1) else: self.prediction_bar.update(self.prediction_bar.value + 1) def UpperCAmelCase ( self , __a , __a , __a , **__a) -> int: '''simple docstring''' if self.prediction_bar is not None: self.prediction_bar.close() _UpperCamelCase = None def UpperCAmelCase ( self , __a , __a , __a , __a=None , **__a) -> Dict: '''simple docstring''' # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: _UpperCamelCase = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy _UpperCamelCase = state.global_step self.training_tracker.write_line(__a) def UpperCAmelCase ( self , __a , __a , __a , __a=None , **__a) -> Tuple: '''simple docstring''' if self.training_tracker is not None: _UpperCamelCase = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history): if "loss" in log: _UpperCamelCase = log['''loss'''] break if self.first_column == "Epoch": _UpperCamelCase = int(state.epoch) else: _UpperCamelCase = state.global_step _UpperCamelCase = '''eval''' for k in metrics: if k.endswith('''_loss'''): _UpperCamelCase = re.sub(R'''\_loss$''' , '''''' , __a) _UpperCamelCase = metrics.pop('''total_flos''' , __a) _UpperCamelCase = metrics.pop('''epoch''' , __a) _UpperCamelCase = metrics.pop(F'''{metric_key_prefix}_runtime''' , __a) _UpperCamelCase = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , __a) _UpperCamelCase = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , __a) _UpperCamelCase = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , __a) for k, v in metrics.items(): if k == F'''{metric_key_prefix}_loss''': _UpperCamelCase = v else: _UpperCamelCase = k.split('''_''') _UpperCamelCase = ''' '''.join([part.capitalize() for part in splits[1:]]) _UpperCamelCase = v self.training_tracker.write_line(__a) self.training_tracker.remove_child() _UpperCamelCase = None # Evaluation takes a long time so we should force the next update. _UpperCamelCase = True def UpperCAmelCase ( self , __a , __a , __a , **__a) -> Dict: '''simple docstring''' self.training_tracker.update( state.global_step , comment=F'''Epoch {int(state.epoch)}/{state.num_train_epochs}''' , force_update=__a) _UpperCamelCase = None
702
"""simple docstring""" import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) _a = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase = k.replace(__snake_case, __snake_case ) if k.startswith('''encoder''' ): _UpperCamelCase = k.replace('''.attn''', '''.self_attn''' ) _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''final_layer_norm''' ) elif k.startswith('''decoder''' ): _UpperCamelCase = k.replace('''norm1''', '''self_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm2''', '''encoder_attn_layer_norm''' ) _UpperCamelCase = k.replace('''norm3''', '''final_layer_norm''' ) return k def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = [ '''model.encoder.layernorm_embedding.weight''', '''model.encoder.layernorm_embedding.bias''', '''model.decoder.layernorm_embedding.weight''', '''model.decoder.layernorm_embedding.bias''', ] for k in keys: _UpperCamelCase = sd.pop(__snake_case ) _UpperCamelCase = k.replace('''layernorm_embedding''', '''layer_norm''' ) assert new_k not in sd _UpperCamelCase = v _a = ["""START"""] @torch.no_grad() def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int: """simple docstring""" _UpperCamelCase = torch.load(__snake_case, map_location='''cpu''' ) _UpperCamelCase = model['''model'''] _UpperCamelCase = BlenderbotConfig.from_json_file(__snake_case ) _UpperCamelCase = BlenderbotForConditionalGeneration(__snake_case ) _UpperCamelCase = m.model.state_dict().keys() _UpperCamelCase = [] _UpperCamelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase = rename_state_dict_key(__snake_case ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__snake_case ) m.model.load_state_dict(__snake_case, strict=__snake_case ) m.half() m.save_pretrained(__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) _a = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
78
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case = 1_00_00_00 ) -> int: """simple docstring""" _UpperCamelCase = limit + 1 _UpperCamelCase = [0] * limit for first_term in range(1, __snake_case ): for n in range(__snake_case, __snake_case, __snake_case ): _UpperCamelCase = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a _UpperCamelCase = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(F"""{solution() = }""")
703
"""simple docstring""" import argparse import os.path as osp import re import torch from safetensors.torch import load_file, save_file # =================# # UNet Conversion # # =================# _a = [ # (stable-diffusion, HF Diffusers) ("""time_embed.0.weight""", """time_embedding.linear_1.weight"""), ("""time_embed.0.bias""", """time_embedding.linear_1.bias"""), ("""time_embed.2.weight""", """time_embedding.linear_2.weight"""), ("""time_embed.2.bias""", """time_embedding.linear_2.bias"""), ("""input_blocks.0.0.weight""", """conv_in.weight"""), ("""input_blocks.0.0.bias""", """conv_in.bias"""), ("""out.0.weight""", """conv_norm_out.weight"""), ("""out.0.bias""", """conv_norm_out.bias"""), ("""out.2.weight""", """conv_out.weight"""), ("""out.2.bias""", """conv_out.bias"""), ] _a = [ # (stable-diffusion, HF Diffusers) ("""in_layers.0""", """norm1"""), ("""in_layers.2""", """conv1"""), ("""out_layers.0""", """norm2"""), ("""out_layers.3""", """conv2"""), ("""emb_layers.1""", """time_emb_proj"""), ("""skip_connection""", """conv_shortcut"""), ] _a = [] # hardcoded number of downblocks and resnets/attentions... # would need smarter logic for other networks. for i in range(4): # loop over downblocks/upblocks for j in range(2): # loop over resnets/attentions for downblocks _a = F"""down_blocks.{i}.resnets.{j}.""" _a = F"""input_blocks.{3*i + j + 1}.0.""" unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 _a = F"""down_blocks.{i}.attentions.{j}.""" _a = F"""input_blocks.{3*i + j + 1}.1.""" unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks _a = F"""up_blocks.{i}.resnets.{j}.""" _a = F"""output_blocks.{3*i + j}.0.""" unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) if i > 0: # no attention layers in up_blocks.0 _a = F"""up_blocks.{i}.attentions.{j}.""" _a = F"""output_blocks.{3*i + j}.1.""" unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 _a = F"""down_blocks.{i}.downsamplers.0.conv.""" _a = F"""input_blocks.{3*(i+1)}.0.op.""" unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 _a = F"""up_blocks.{i}.upsamplers.0.""" _a = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}.""" unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) _a = """mid_block.attentions.0.""" _a = """middle_block.1.""" unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix)) for j in range(2): _a = F"""mid_block.resnets.{j}.""" _a = F"""middle_block.{2*j}.""" unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" _UpperCamelCase = {k: k for k in unet_state_dict.keys()} for sd_name, hf_name in unet_conversion_map: _UpperCamelCase = sd_name for k, v in mapping.items(): if "resnets" in k: for sd_part, hf_part in unet_conversion_map_resnet: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v for k, v in mapping.items(): for sd_part, hf_part in unet_conversion_map_layer: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v _UpperCamelCase = {v: unet_state_dict[k] for k, v in mapping.items()} return new_state_dict # ================# # VAE Conversion # # ================# _a = [ # (stable-diffusion, HF Diffusers) ("""nin_shortcut""", """conv_shortcut"""), ("""norm_out""", """conv_norm_out"""), ("""mid.attn_1.""", """mid_block.attentions.0."""), ] for i in range(4): # down_blocks have two resnets for j in range(2): _a = F"""encoder.down_blocks.{i}.resnets.{j}.""" _a = F"""encoder.down.{i}.block.{j}.""" vae_conversion_map.append((sd_down_prefix, hf_down_prefix)) if i < 3: _a = F"""down_blocks.{i}.downsamplers.0.""" _a = F"""down.{i}.downsample.""" vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix)) _a = F"""up_blocks.{i}.upsamplers.0.""" _a = F"""up.{3-i}.upsample.""" vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix)) # up_blocks have three resnets # also, up blocks in hf are numbered in reverse from sd for j in range(3): _a = F"""decoder.up_blocks.{i}.resnets.{j}.""" _a = F"""decoder.up.{3-i}.block.{j}.""" vae_conversion_map.append((sd_up_prefix, hf_up_prefix)) # this part accounts for mid blocks in both the encoder and the decoder for i in range(2): _a = F"""mid_block.resnets.{i}.""" _a = F"""mid.block_{i+1}.""" vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix)) _a = [ # (stable-diffusion, HF Diffusers) ("""norm.""", """group_norm."""), ("""q.""", """query."""), ("""k.""", """key."""), ("""v.""", """value."""), ("""proj_out.""", """proj_attn."""), ] def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" return w.reshape(*w.shape, 1, 1 ) def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = {k: k for k in vae_state_dict.keys()} for k, v in mapping.items(): for sd_part, hf_part in vae_conversion_map: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v for k, v in mapping.items(): if "attentions" in k: for sd_part, hf_part in vae_conversion_map_attn: _UpperCamelCase = v.replace(__snake_case, __snake_case ) _UpperCamelCase = v _UpperCamelCase = {v: vae_state_dict[k] for k, v in mapping.items()} _UpperCamelCase = ['''q''', '''k''', '''v''', '''proj_out'''] for k, v in new_state_dict.items(): for weight_name in weights_to_convert: if F'''mid.attn_1.{weight_name}.weight''' in k: print(F'''Reshaping {k} for SD format''' ) _UpperCamelCase = reshape_weight_for_sd(__snake_case ) return new_state_dict # =========================# # Text Encoder Conversion # # =========================# _a = [ # (stable-diffusion, HF Diffusers) ("""resblocks.""", """text_model.encoder.layers."""), ("""ln_1""", """layer_norm1"""), ("""ln_2""", """layer_norm2"""), (""".c_fc.""", """.fc1."""), (""".c_proj.""", """.fc2."""), (""".attn""", """.self_attn"""), ("""ln_final.""", """transformer.text_model.final_layer_norm."""), ("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""), ("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""), ] _a = {re.escape(x[1]): x[0] for x in textenc_conversion_lst} _a = re.compile("""|""".join(protected.keys())) # Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp _a = {"""q""": 0, """k""": 1, """v""": 2} def lowerCamelCase__ ( __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = {} _UpperCamelCase = {} for k, v in text_enc_dict.items(): if ( k.endswith('''.self_attn.q_proj.weight''' ) or k.endswith('''.self_attn.k_proj.weight''' ) or k.endswith('''.self_attn.v_proj.weight''' ) ): _UpperCamelCase = k[: -len('''.q_proj.weight''' )] _UpperCamelCase = k[-len('''q_proj.weight''' )] if k_pre not in capture_qkv_weight: _UpperCamelCase = [None, None, None] _UpperCamelCase = v continue if ( k.endswith('''.self_attn.q_proj.bias''' ) or k.endswith('''.self_attn.k_proj.bias''' ) or k.endswith('''.self_attn.v_proj.bias''' ) ): _UpperCamelCase = k[: -len('''.q_proj.bias''' )] _UpperCamelCase = k[-len('''q_proj.bias''' )] if k_pre not in capture_qkv_bias: _UpperCamelCase = [None, None, None] _UpperCamelCase = v continue _UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case ) _UpperCamelCase = v for k_pre, tensors in capture_qkv_weight.items(): if None in tensors: raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' ) _UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case ) _UpperCamelCase = torch.cat(__snake_case ) for k_pre, tensors in capture_qkv_bias.items(): if None in tensors: raise Exception('''CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing''' ) _UpperCamelCase = textenc_pattern.sub(lambda __snake_case : protected[re.escape(m.group(0 ) )], __snake_case ) _UpperCamelCase = torch.cat(__snake_case ) return new_state_dict def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" return text_enc_dict if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt.""" ) _a = parser.parse_args() assert args.model_path is not None, "Must provide a model path!" assert args.checkpoint_path is not None, "Must provide a checkpoint path!" # Path for safetensors _a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""") _a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""") _a = osp.join(args.model_path, """text_encoder""", """model.safetensors""") # Load models from safetensors if it exists, if it doesn't pytorch if osp.exists(unet_path): _a = load_file(unet_path, device="""cpu""") else: _a = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""") _a = torch.load(unet_path, map_location="""cpu""") if osp.exists(vae_path): _a = load_file(vae_path, device="""cpu""") else: _a = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""") _a = torch.load(vae_path, map_location="""cpu""") if osp.exists(text_enc_path): _a = load_file(text_enc_path, device="""cpu""") else: _a = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""") _a = torch.load(text_enc_path, map_location="""cpu""") # Convert the UNet model _a = convert_unet_state_dict(unet_state_dict) _a = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()} # Convert the VAE model _a = convert_vae_state_dict(vae_state_dict) _a = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()} # Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper _a = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict if is_vaa_model: # Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm _a = {"""transformer.""" + k: v for k, v in text_enc_dict.items()} _a = convert_text_enc_state_dict_vaa(text_enc_dict) _a = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()} else: _a = convert_text_enc_state_dict(text_enc_dict) _a = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()} # Put together new checkpoint _a = {**unet_state_dict, **vae_state_dict, **text_enc_dict} if args.half: _a = {k: v.half() for k, v in state_dict.items()} if args.use_safetensors: save_file(state_dict, args.checkpoint_path) else: _a = {"""state_dict""": state_dict} torch.save(state_dict, args.checkpoint_path)
78
0
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" def is_in_circle(__snake_case, __snake_case ) -> bool: _UpperCamelCase = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle _UpperCamelCase = mean( int(is_in_circle(uniform(-1.0, 1.0 ), uniform(-1.0, 1.0 ) ) ) for _ in range(__snake_case ) ) # The ratio of the area for circle to square is pi/4. _UpperCamelCase = proportion * 4 print(F'''The estimated value of pi is {pi_estimate}''' ) print(F'''The numpy value of pi is {pi}''' ) print(F'''The total error is {abs(pi - pi_estimate )}''' ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case = 0.0, __snake_case = 1.0, ) -> float: """simple docstring""" return mean( function_to_integrate(uniform(__snake_case, __snake_case ) ) for _ in range(__snake_case ) ) * (max_value - min_value) def lowerCamelCase__ ( __snake_case, __snake_case = 0.0, __snake_case = 1.0 ) -> None: """simple docstring""" def identity_function(__snake_case ) -> float: return x _UpperCamelCase = area_under_curve_estimator( __snake_case, __snake_case, __snake_case, __snake_case ) _UpperCamelCase = (max_value * max_value - min_value * min_value) / 2 print('''******************''' ) print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {expected_value}''' ) print(F'''Total error is {abs(estimated_value - expected_value )}''' ) print('''******************''' ) def lowerCamelCase__ ( __snake_case ) -> None: """simple docstring""" def function_to_integrate(__snake_case ) -> float: return sqrt(4.0 - x * x ) _UpperCamelCase = area_under_curve_estimator( __snake_case, __snake_case, 0.0, 2.0 ) print('''******************''' ) print('''Estimating pi using area_under_curve_estimator''' ) print(F'''Estimated value is {estimated_value}''' ) print(F'''Expected value is {pi}''' ) print(F'''Total error is {abs(estimated_value - pi )}''' ) print('''******************''' ) if __name__ == "__main__": import doctest doctest.testmod()
704
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" if openai_config_file == "": _UpperCamelCase = OpenAIGPTConfig() else: _UpperCamelCase = OpenAIGPTConfig.from_json_file(__snake_case ) _UpperCamelCase = OpenAIGPTModel(__snake_case ) # Load weights from numpy load_tf_weights_in_openai_gpt(__snake_case, __snake_case, __snake_case ) # Save pytorch-model _UpperCamelCase = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME _UpperCamelCase = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' ) torch.save(model.state_dict(), __snake_case ) print(F'''Save configuration file to {pytorch_config_dump_path}''' ) with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) _a = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
78
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets _a = """\ @inproceedings{snover-etal-2006-study, title = \"A Study of Translation Edit Rate with Targeted Human Annotation\", author = \"Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John\", booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\", month = aug # \" 8-12\", year = \"2006\", address = \"Cambridge, Massachusetts, USA\", publisher = \"Association for Machine Translation in the Americas\", url = \"https://aclanthology.org/2006.amta-papers.25\", pages = \"223--231\", } @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ _a = """\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. """ _a = """ Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: 'score' (float): TER score (num_edits / sum_ref_lengths * 100) 'num_edits' (int): The cumulative number of edits 'ref_length' (float): The cumulative average reference length Examples: Example 1: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0} Example 2: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0} Example 3: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5} Example 4: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0} Example 5: >>> predictions = [\"does this sentence match??\", ... \"what about this sentence?\", ... \"What did the TER metric user say to the developer?\"] >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"], ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"], ... [\"Your jokes are...\", \"...TERrible\"]] >>> ter = datasets.load_metric(\"ter\") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _UpperCAmelCase( datasets.Metric ): def UpperCAmelCase ( self) -> int: '''simple docstring''' if version.parse(scb.__version__) < version.parse('''1.4.12'''): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''') return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence'''), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''') , id='''references'''), }) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def UpperCAmelCase ( self , __a , __a , __a = False , __a = False , __a = False , __a = False , ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = len(references[0]) if any(len(__a) != references_per_prediction for refs in references): raise ValueError('''Sacrebleu requires the same number of references for each prediction''') _UpperCamelCase = [[refs[i] for refs in references] for i in range(__a)] _UpperCamelCase = TER( normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , ) _UpperCamelCase = sb_ter.corpus_score(__a , __a) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
705
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class _UpperCAmelCase: lowercase__ = MBartConfig lowercase__ = {} lowercase__ = 'gelu' def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> Any: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = eos_token_id _UpperCamelCase = pad_token_id _UpperCamelCase = bos_token_id def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) _UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) _UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCamelCase = prepare_mbart_inputs_dict(__a , __a , __a) return config, inputs_dict def UpperCAmelCase ( self , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = TFMBartModel(config=__a).get_decoder() _UpperCamelCase = inputs_dict['''input_ids'''] _UpperCamelCase = input_ids[:1, :] _UpperCamelCase = inputs_dict['''attention_mask'''][:1, :] _UpperCamelCase = inputs_dict['''head_mask'''] _UpperCamelCase = 1 # first forward pass _UpperCamelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a) _UpperCamelCase , _UpperCamelCase = outputs.to_tuple() _UpperCamelCase = past_key_values[1] def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, __snake_case=None, ) -> Optional[int]: """simple docstring""" if attention_mask is None: _UpperCamelCase = tf.cast(tf.math.not_equal(__snake_case, config.pad_token_id ), tf.inta ) if decoder_attention_mask is None: _UpperCamelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ), tf.inta ), ], axis=-1, ) if head_mask is None: _UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () lowercase__ = (TFMBartForConditionalGeneration,) if is_tf_available() else () lowercase__ = ( { 'conversational': TFMBartForConditionalGeneration, 'feature-extraction': TFMBartModel, 'summarization': TFMBartForConditionalGeneration, 'text2text-generation': TFMBartForConditionalGeneration, 'translation': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) lowercase__ = True lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = TFMBartModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a) def UpperCAmelCase ( self) -> str: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a) @require_sentencepiece @require_tokenizers @require_tf class _UpperCAmelCase( unittest.TestCase ): lowercase__ = [ ' UN Chief Says There Is No Military Solution in Syria', ] lowercase__ = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', ] lowercase__ = 'facebook/mbart-large-en-ro' @cached_property def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def UpperCAmelCase ( self , **__a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.translate_src_text(**__a) self.assertListEqual(self.expected_text , __a) def UpperCAmelCase ( self , **__a) -> Dict: '''simple docstring''' _UpperCamelCase = self.tokenizer(self.src_text , **__a , return_tensors='''tf''') _UpperCamelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2) _UpperCamelCase = self.tokenizer.batch_decode(__a , skip_special_tokens=__a) return generated_words @slow def UpperCAmelCase ( self) -> Any: '''simple docstring''' self._assert_generated_batch_equal_expected()
78
0
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = patch_size _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 _UpperCamelCase = frequency_out_dimension * time_out_dimension _UpperCamelCase = num_patches + 2 def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = self.get_config() return config, input_values, labels def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ASTModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_values''': input_values} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowercase__ = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = ASTModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37) def UpperCAmelCase ( self) -> int: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) _UpperCamelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''input_values'''] self.assertListEqual(arg_names[:1] , __a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = ASTModel.from_pretrained(__a) self.assertIsNotNone(__a) def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" _UpperCamelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' ) _UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case ) return audio, sampling_rate @require_torch @require_torchaudio class _UpperCAmelCase( unittest.TestCase ): @cached_property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''') if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.default_feature_extractor _UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a) _UpperCamelCase = self.default_feature_extractor _UpperCamelCase , _UpperCamelCase = prepare_audio() _UpperCamelCase = audio.squeeze().numpy() _UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a) # forward pass with torch.no_grad(): _UpperCamelCase = model(**__a) # verify the logits _UpperCamelCase = torch.Size((1, 5_27)) self.assertEqual(outputs.logits.shape , __a) _UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
706
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging _a = logging.get_logger(__name__) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['pixel_values'] def __init__( self , __a = True , __a = 1 / 2_55 , __a = True , __a = 8 , **__a , ) -> None: '''simple docstring''' super().__init__(**__a) _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad _UpperCamelCase = pad_size def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray: '''simple docstring''' return rescale(__a , scale=__a , data_format=__a , **__a) def UpperCAmelCase ( self , __a , __a , __a = None) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = get_image_size(__a) _UpperCamelCase = (old_height // size + 1) * size - old_height _UpperCamelCase = (old_width // size + 1) * size - old_width return pad(__a , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=__a) def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Tuple: '''simple docstring''' _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_pad if do_pad is not None else self.do_pad _UpperCamelCase = pad_size if pad_size is not None else self.pad_size _UpperCamelCase = make_list_of_images(__a) if not valid_images(__a): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(__a) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images] if do_pad: _UpperCamelCase = [self.pad(__a , size=__a) for image in images] _UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=__a , tensor_type=__a)
78
0
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( '''split_dict''', [ SplitDict(), SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=13_37, num_examples=42, dataset_name='''my_dataset''' )} ), SplitDict({'''train''': SplitInfo(name='''train''', num_bytes=13_37, num_examples=42 )} ), SplitDict({'''train''': SplitInfo()} ), ], ) def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = split_dict._to_yaml_list() assert len(__snake_case ) == len(__snake_case ) _UpperCamelCase = SplitDict._from_yaml_list(__snake_case ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump _UpperCamelCase = None # the split name of split_dict takes over the name of the split info object _UpperCamelCase = split_name assert split_dict == reloaded @pytest.mark.parametrize( '''split_info''', [SplitInfo(), SplitInfo(dataset_name=__snake_case ), SplitInfo(dataset_name='''my_dataset''' )] ) def lowerCamelCase__ ( __snake_case ) -> Tuple: """simple docstring""" _UpperCamelCase = asdict(SplitDict({'''train''': split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
707
"""simple docstring""" from importlib import import_module from .logging import get_logger _a = get_logger(__name__) class _UpperCAmelCase: def __init__( self , __a , __a=None) -> Dict: '''simple docstring''' _UpperCamelCase = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__'''): setattr(self , __a , getattr(__a , __a)) _UpperCamelCase = module._original_module if isinstance(__a , _PatchedModuleObj) else module class _UpperCAmelCase: lowercase__ = [] def __init__( self , __a , __a , __a , __a=None) -> List[str]: '''simple docstring''' _UpperCamelCase = obj _UpperCamelCase = target _UpperCamelCase = new _UpperCamelCase = target.split('''.''')[0] _UpperCamelCase = {} _UpperCamelCase = attrs or [] def __enter__( self) -> int: '''simple docstring''' *_UpperCamelCase , _UpperCamelCase = self.target.split('''.''') # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__a)): try: _UpperCamelCase = import_module('''.'''.join(submodules[: i + 1])) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): _UpperCamelCase = getattr(self.obj , __a) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__a , _PatchedModuleObj) and obj_attr._original_module is submodule) ): _UpperCamelCase = obj_attr # patch at top level setattr(self.obj , __a , _PatchedModuleObj(__a , attrs=self.attrs)) _UpperCamelCase = getattr(self.obj , __a) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__a , __a , _PatchedModuleObj(getattr(__a , __a , __a) , attrs=self.attrs)) _UpperCamelCase = getattr(__a , __a) # finally set the target attribute setattr(__a , __a , self.new) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: _UpperCamelCase = getattr(import_module('''.'''.join(__a)) , __a) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __a) is attr_value: _UpperCamelCase = getattr(self.obj , __a) setattr(self.obj , __a , self.new) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" _UpperCamelCase = globals()['''__builtins__'''][target_attr] setattr(self.obj , __a , self.new) else: raise RuntimeError(F'''Tried to patch attribute {target_attr} instead of a submodule.''') def __exit__( self , *__a) -> Tuple: '''simple docstring''' for attr in list(self.original): setattr(self.obj , __a , self.original.pop(__a)) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' self.__enter__() self._active_patches.append(self) def UpperCAmelCase ( self) -> str: '''simple docstring''' try: self._active_patches.remove(self) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
78
0
"""simple docstring""" import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) _a : Any = logging.getLogger() def lowerCamelCase__ ( ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''-f''' ) _UpperCamelCase = parser.parse_args() return args.f def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = os.path.join(__snake_case, '''all_results.json''' ) if os.path.exists(__snake_case ): with open(__snake_case, '''r''' ) as f: _UpperCamelCase = json.load(__snake_case ) else: raise ValueError(F'''can\'t find {path}''' ) return results def lowerCamelCase__ ( ) -> Tuple: """simple docstring""" _UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda''' return is_using_cuda and is_apex_available() _a : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class _UpperCAmelCase( lowerCamelCase ): @classmethod def UpperCAmelCase ( cls) -> Optional[int]: '''simple docstring''' _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = os.path.join(cls.tmpdir , '''default_config.yml''') write_basic_config(save_location=cls.configPath) _UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath] @classmethod def UpperCAmelCase ( cls) -> int: '''simple docstring''' shutil.rmtree(cls.tmpdir) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''glue_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertLess(result['''perplexity'''] , 1_00) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''clm_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertLess(result['''perplexity'''] , 42) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''mlm_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = 7 if get_gpu_count() > 1 else 2 _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75) self.assertLess(result['''train_loss'''] , 0.5) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''ner_no_trainer'''))) @unittest.skip(reason='''Fix me @muellerzr''') @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['''eval_f1'''] , 28) self.assertGreaterEqual(result['''eval_exact'''] , 28) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''qa_no_trainer'''))) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8) self.assertTrue(os.path.exists(os.path.join(__a , '''swag_no_trainer'''))) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertGreaterEqual(result['''eval_rouge1'''] , 10) self.assertGreaterEqual(result['''eval_rouge2'''] , 2) self.assertGreaterEqual(result['''eval_rougeL'''] , 7) self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''summarization_no_trainer'''))) @slow @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertGreaterEqual(result['''eval_bleu'''] , 30) self.assertTrue(os.path.exists(os.path.join(__a , '''epoch_0'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''translation_no_trainer'''))) @slow def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = logging.StreamHandler(sys.stdout) logger.addHandler(__a) _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10) @mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''}) def UpperCAmelCase ( self) -> str: '''simple docstring''' _UpperCamelCase = self.get_auto_remove_tmp_dir() _UpperCamelCase = F''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('''--fp16''') run_command(self._launch_args + testargs) _UpperCamelCase = get_results(__a) # The base model scores a 25% self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6) self.assertTrue(os.path.exists(os.path.join(__a , '''step_1'''))) self.assertTrue(os.path.exists(os.path.join(__a , '''image_classification_no_trainer''')))
708
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
78
0
"""simple docstring""" from __future__ import annotations from math import gcd def lowerCamelCase__ ( __snake_case, __snake_case = 2, __snake_case = 1, __snake_case = 3, ) -> int | None: """simple docstring""" if num < 2: raise ValueError('''The input value cannot be less than 2''' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(__snake_case, __snake_case, __snake_case ) -> int: return (pow(__snake_case, 2 ) + step) % modulus for _ in range(__snake_case ): # These track the position within the cycle detection logic. _UpperCamelCase = seed _UpperCamelCase = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. _UpperCamelCase = rand_fn(__snake_case, __snake_case, __snake_case ) _UpperCamelCase = rand_fn(__snake_case, __snake_case, __snake_case ) _UpperCamelCase = rand_fn(__snake_case, __snake_case, __snake_case ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. _UpperCamelCase = gcd(hare - tortoise, __snake_case ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. _UpperCamelCase = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse _a = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) _a = parser.parse_args() _a = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(F"""{args.num} is probably prime""") else: _a = args.num // divisor print(F"""{args.num} = {divisor} * {quotient}""")
709
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging _a = logging.get_logger(__name__) _a = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = 'gpt_neo' lowercase__ = ['past_key_values'] lowercase__ = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'} def __init__( self , __a=5_02_57 , __a=20_48 , __a=20_48 , __a=24 , __a=[[["global", "local"], 12]] , __a=16 , __a=None , __a=2_56 , __a="gelu_new" , __a=0.0 , __a=0.0 , __a=0.0 , __a=0.1 , __a=1e-5 , __a=0.02 , __a=True , __a=5_02_56 , __a=5_02_56 , **__a , ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = vocab_size _UpperCamelCase = max_position_embeddings _UpperCamelCase = hidden_size _UpperCamelCase = num_layers _UpperCamelCase = num_heads _UpperCamelCase = intermediate_size _UpperCamelCase = window_size _UpperCamelCase = activation_function _UpperCamelCase = resid_dropout _UpperCamelCase = embed_dropout _UpperCamelCase = attention_dropout _UpperCamelCase = classifier_dropout _UpperCamelCase = layer_norm_epsilon _UpperCamelCase = initializer_range _UpperCamelCase = use_cache _UpperCamelCase = bos_token_id _UpperCamelCase = eos_token_id _UpperCamelCase = attention_types _UpperCamelCase = self.expand_attention_types_params(__a) if len(self.attention_layers) != self.num_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.attention_layers)` == `config.num_layers` ''' F'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, ''' F'''`config.num_layers = {self.num_layers}`. ''' '''`config.attention_layers` is prepared using `config.attention_types`. ''' '''Please verify the value of `config.attention_types` argument.''') super().__init__(bos_token_id=__a , eos_token_id=__a , **__a) @staticmethod def UpperCAmelCase ( __a) -> int: '''simple docstring''' _UpperCamelCase = [] for item in attention_types: for _ in range(item[1]): attentions.extend(item[0]) return attentions def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> str: """simple docstring""" import torch _UpperCamelCase = input.size() _UpperCamelCase = len(__snake_case ) _UpperCamelCase = shape[dimension] _UpperCamelCase = torch.arange(0, __snake_case, __snake_case ) _UpperCamelCase = torch.div(sizedim - size, __snake_case, rounding_mode='''floor''' ) + 1 _UpperCamelCase = torch.arange(__snake_case ) + low_indices[:min_length][:, None] _UpperCamelCase = [slice(__snake_case )] * rank _UpperCamelCase = indices _UpperCamelCase = input[s] _UpperCamelCase = list(range(0, rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> str: """simple docstring""" import torch _UpperCamelCase = torch.arange(1, __snake_case ) _UpperCamelCase = torch.remainder(__snake_case, __snake_case ) _UpperCamelCase = remainders == 0 _UpperCamelCase = candidates[divisor_indices] _UpperCamelCase = torch.max(__snake_case ) return largest_divisor, torch.div(__snake_case, __snake_case, rounding_mode='''floor''' ) class _UpperCAmelCase( lowerCamelCase ): @property def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' _UpperCamelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}}) if self.use_past: self.fill_with_past_key_values_(__a , direction='''inputs''') _UpperCamelCase = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _UpperCamelCase = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return self._config.num_heads def UpperCAmelCase ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ) -> Mapping[str, Any]: '''simple docstring''' _UpperCamelCase = super(__a , self).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a) # We need to order the input in the way they appears in the forward() _UpperCamelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''') else: import torch _UpperCamelCase , _UpperCamelCase = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _UpperCamelCase = seqlen + 2 _UpperCamelCase = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCamelCase = [ (torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers) ] _UpperCamelCase = common_inputs['''attention_mask'''] if self.use_past: _UpperCamelCase = ordered_inputs['''attention_mask'''].dtype _UpperCamelCase = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(__a , __a , dtype=__a)] , dim=1) return ordered_inputs @property def UpperCAmelCase ( self) -> int: '''simple docstring''' return 13
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
710
"""simple docstring""" import sys from collections import defaultdict class _UpperCAmelCase: def __init__( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = [] def UpperCAmelCase ( self , __a) -> Optional[Any]: '''simple docstring''' return self.node_position[vertex] def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = pos def UpperCAmelCase ( self , __a , __a , __a , __a) -> Tuple: '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: _UpperCamelCase = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: _UpperCamelCase = 2 * start + 1 else: _UpperCamelCase = 2 * start + 2 if heap[smallest_child] < heap[start]: _UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child] _UpperCamelCase , _UpperCamelCase = ( heap[start], positions[start], ) _UpperCamelCase , _UpperCamelCase = temp, tempa _UpperCamelCase = self.get_position(positions[smallest_child]) self.set_position( positions[smallest_child] , self.get_position(positions[start])) self.set_position(positions[start] , __a) self.top_to_bottom(__a , __a , __a , __a) def UpperCAmelCase ( self , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = position[index] while index != 0: _UpperCamelCase = int((index - 2) / 2) if index % 2 == 0 else int((index - 1) / 2) if val < heap[parent]: _UpperCamelCase = heap[parent] _UpperCamelCase = position[parent] self.set_position(position[parent] , __a) else: _UpperCamelCase = val _UpperCamelCase = temp self.set_position(__a , __a) break _UpperCamelCase = parent else: _UpperCamelCase = val _UpperCamelCase = temp self.set_position(__a , 0) def UpperCAmelCase ( self , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = len(__a) // 2 - 1 for i in range(__a , -1 , -1): self.top_to_bottom(__a , __a , len(__a) , __a) def UpperCAmelCase ( self , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = positions[0] _UpperCamelCase = sys.maxsize self.top_to_bottom(__a , 0 , len(__a) , __a) return temp def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = Heap() _UpperCamelCase = [0] * len(__snake_case ) _UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph _UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex _UpperCamelCase = [] for vertex in range(len(__snake_case ) ): distance_tv.append(sys.maxsize ) positions.append(__snake_case ) heap.node_position.append(__snake_case ) _UpperCamelCase = [] _UpperCamelCase = 1 _UpperCamelCase = sys.maxsize for neighbor, distance in adjacency_list[0]: _UpperCamelCase = 0 _UpperCamelCase = distance heap.heapify(__snake_case, __snake_case ) for _ in range(1, len(__snake_case ) ): _UpperCamelCase = heap.delete_minimum(__snake_case, __snake_case ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) _UpperCamelCase = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(__snake_case )] ): _UpperCamelCase = distance heap.bottom_to_top( __snake_case, heap.get_position(__snake_case ), __snake_case, __snake_case ) _UpperCamelCase = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > _a = int(input("""Enter number of edges: """).strip()) _a = defaultdict(list) for _ in range(edges_number): _a = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
78
0
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class _UpperCAmelCase: def __init__( self , __a , __a=2 , __a=True , __a=False , __a=10 , __a=3 , __a=32 * 4 , __a=32 * 6 , __a=4 , __a=32 , ) -> Dict: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = is_training _UpperCamelCase = use_auxiliary_loss _UpperCamelCase = num_queries _UpperCamelCase = num_channels _UpperCamelCase = min_size _UpperCamelCase = max_size _UpperCamelCase = num_labels _UpperCamelCase = mask_feature_size def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( __a) _UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__a) _UpperCamelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__a) > 0.5 ).float() _UpperCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=__a) > 0.5).long() _UpperCamelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase ( self) -> int: '''simple docstring''' return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs() _UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCAmelCase ( self , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = output.encoder_hidden_states _UpperCamelCase = output.pixel_decoder_hidden_states _UpperCamelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__a) , len(config.backbone_config.depths)) self.parent.assertTrue(len(__a) , len(config.backbone_config.depths)) self.parent.assertTrue(len(__a) , config.decoder_config.decoder_layers) def UpperCAmelCase ( self , __a , __a , __a , __a=False) -> Tuple: '''simple docstring''' with torch.no_grad(): _UpperCamelCase = MaskFormerModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(pixel_values=__a , pixel_mask=__a) _UpperCamelCase = model(__a , output_hidden_states=__a) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(__a , __a) def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = MaskFormerForInstanceSegmentation(config=__a) model.to(__a) model.eval() def comm_check_on_output(__a): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _UpperCamelCase = model(pixel_values=__a , pixel_mask=__a) _UpperCamelCase = model(__a) comm_check_on_output(__a) _UpperCamelCase = model( pixel_values=__a , pixel_mask=__a , mask_labels=__a , class_labels=__a) comm_check_on_output(__a) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowercase__ = ( {'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = MaskFormerModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__a , **__a , output_hidden_states=__a) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__a) @unittest.skip(reason='''MaskFormer does not use inputs_embeds''') def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''') def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='''MaskFormer is not a generative model''') def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='''MaskFormer does not use token embeddings''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip( reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''') def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) _UpperCamelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __a) @slow def UpperCAmelCase ( self) -> Dict: '''simple docstring''' for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCamelCase = MaskFormerModel.from_pretrained(__a) self.assertIsNotNone(__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = (self.model_tester.min_size,) * 2 _UpperCamelCase = { '''pixel_values''': torch.randn((2, 3, *size) , device=__a), '''mask_labels''': torch.randn((2, 10, *size) , device=__a), '''class_labels''': torch.zeros(2 , 10 , device=__a).long(), } _UpperCamelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(__a) _UpperCamelCase = model(**__a) self.assertTrue(outputs.loss is not None) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(__a , **__a , output_hidden_states=__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a).to(__a) _UpperCamelCase = model(**__a , output_attentions=__a) self.assertTrue(outputs.attentions is not None) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCamelCase = self.all_model_classes[1] _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs() _UpperCamelCase = model_class(__a) model.to(__a) model.train() _UpperCamelCase = model(__a , mask_labels=__a , class_labels=__a).loss loss.backward() def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.all_model_classes[1] _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs() _UpperCamelCase = True _UpperCamelCase = True _UpperCamelCase = model_class(__a) model.to(__a) model.train() _UpperCamelCase = model(__a , mask_labels=__a , class_labels=__a) _UpperCamelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCamelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCamelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCamelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__a) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) _a = 1E-4 def lowerCamelCase__ ( ) -> Optional[int]: """simple docstring""" _UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class _UpperCAmelCase( unittest.TestCase ): @cached_property def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ( MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''') if is_vision_available() else None ) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''').to(__a) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(__a , return_tensors='''pt''').to(__a) _UpperCamelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(__a , (1, 3, 8_00, 10_88)) with torch.no_grad(): _UpperCamelCase = model(**__a) _UpperCamelCase = torch.tensor( [[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(__a) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a)) _UpperCamelCase = torch.tensor( [[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(__a) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __a , atol=__a)) _UpperCamelCase = torch.tensor( [[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(__a) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __a , atol=__a)) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''') .to(__a) .eval() ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(__a , return_tensors='''pt''').to(__a) _UpperCamelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(__a , (1, 3, 8_00, 10_88)) with torch.no_grad(): _UpperCamelCase = model(**__a) # masks_queries_logits _UpperCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCamelCase = [ [-1.373_7124, -1.772_4937, -1.936_4233], [-1.597_7281, -1.986_7939, -2.152_3695], [-1.579_5398, -1.926_9832, -2.09_3942], ] _UpperCamelCase = torch.tensor(__a).to(__a) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __a , atol=__a)) # class_queries_logits _UpperCamelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1)) _UpperCamelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ]).to(__a) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=__a)) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''') .to(__a) .eval() ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = prepare_img() _UpperCamelCase = image_processor(__a , return_tensors='''pt''').to(__a) _UpperCamelCase = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(__a , (1, 3, 8_00, 10_88)) with torch.no_grad(): _UpperCamelCase = model(**__a) # masks_queries_logits _UpperCamelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCamelCase = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]] _UpperCamelCase = torch.tensor(__a).to(__a) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __a , atol=__a)) # class_queries_logits _UpperCamelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1)) _UpperCamelCase = torch.tensor( [[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(__a) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __a , atol=__a)) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCamelCase = ( MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''') .to(__a) .eval() ) _UpperCamelCase = self.default_image_processor _UpperCamelCase = image_processor( [np.zeros((3, 8_00, 13_33)), np.zeros((3, 8_00, 13_33))] , segmentation_maps=[np.zeros((3_84, 3_84)).astype(np.floataa), np.zeros((3_84, 3_84)).astype(np.floataa)] , return_tensors='''pt''' , ) _UpperCamelCase = inputs['''pixel_values'''].to(__a) _UpperCamelCase = [el.to(__a) for el in inputs['''mask_labels''']] _UpperCamelCase = [el.to(__a) for el in inputs['''class_labels''']] with torch.no_grad(): _UpperCamelCase = model(**__a) self.assertTrue(outputs.loss is not None)
711
"""simple docstring""" import json import sys def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" with open(__snake_case, encoding='''utf-8''' ) as f: _UpperCamelCase = json.load(__snake_case ) _UpperCamelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(__snake_case ): _UpperCamelCase = results[benchmark_name] _UpperCamelCase = benchmark_name.split('''/''' )[-1] output_md.append(F'''### Benchmark: {benchmark_file_name}''' ) _UpperCamelCase = '''| metric |''' _UpperCamelCase = '''|--------|''' _UpperCamelCase = '''| new / old (diff) |''' for metric_name in sorted(__snake_case ): _UpperCamelCase = benchmark_res[metric_name] _UpperCamelCase = metric_vals['''new'''] _UpperCamelCase = metric_vals.get('''old''', __snake_case ) _UpperCamelCase = metric_vals.get('''diff''', __snake_case ) _UpperCamelCase = F''' {new_val:f}''' if isinstance(__snake_case, (int, float) ) else '''None''' if old_val is not None: val_str += F''' / {old_val:f}''' if isinstance(__snake_case, (int, float) ) else "None" if dif_val is not None: val_str += F''' ({dif_val:f})''' if isinstance(__snake_case, (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(__snake_case, '''w''', encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(__snake_case ) ) if __name__ == "__main__": _a = sys.argv[1] _a = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
78
0
"""simple docstring""" import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" _UpperCamelCase = filter(lambda __snake_case : p.requires_grad, model.parameters() ) _UpperCamelCase = sum([np.prod(p.size() ) for p in model_parameters] ) return params _a = logging.getLogger(__name__) def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" if metric == "rouge2": _UpperCamelCase = '''{val_avg_rouge2:.4f}-{step_count}''' elif metric == "bleu": _UpperCamelCase = '''{val_avg_bleu:.4f}-{step_count}''' elif metric == "em": _UpperCamelCase = '''{val_avg_em:.4f}-{step_count}''' else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' ''' function.''' ) _UpperCamelCase = ModelCheckpoint( dirpath=__snake_case, filename=__snake_case, monitor=F'''val_{metric}''', mode='''max''', save_top_k=3, every_n_epochs=1, ) return checkpoint_callback def lowerCamelCase__ ( __snake_case, __snake_case ) -> List[str]: """simple docstring""" return EarlyStopping( monitor=F'''val_{metric}''', mode='''min''' if '''loss''' in metric else '''max''', patience=__snake_case, verbose=__snake_case, ) class _UpperCAmelCase( pl.Callback ): def UpperCAmelCase ( self , __a , __a) -> int: '''simple docstring''' _UpperCamelCase = {F'''lr_group_{i}''': param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(__a) @rank_zero_only def UpperCAmelCase ( self , __a , __a , __a , __a=True) -> None: '''simple docstring''' logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''') _UpperCamelCase = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']}) # Log results _UpperCamelCase = Path(pl_module.hparams.output_dir) if type_path == "test": _UpperCamelCase = od / '''test_results.txt''' _UpperCamelCase = od / '''test_generations.txt''' else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. _UpperCamelCase = od / F'''{type_path}_results/{trainer.global_step:05d}.txt''' _UpperCamelCase = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=__a) generations_file.parent.mkdir(exist_ok=__a) with open(__a , '''a+''') as writer: for key in sorted(__a): if key in ["log", "progress_bar", "preds"]: continue _UpperCamelCase = metrics[key] if isinstance(__a , torch.Tensor): _UpperCamelCase = val.item() _UpperCamelCase = F'''{key}: {val:.6f}\n''' writer.write(__a) if not save_generations: return if "preds" in metrics: _UpperCamelCase = '''\n'''.join(metrics['''preds''']) generations_file.open('''w+''').write(__a) @rank_zero_only def UpperCAmelCase ( self , __a , __a) -> List[str]: '''simple docstring''' try: _UpperCamelCase = pl_module.model.model.num_parameters() except AttributeError: _UpperCamelCase = pl_module.model.num_parameters() _UpperCamelCase = count_trainable_parameters(__a) # mp stands for million parameters trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6}) @rank_zero_only def UpperCAmelCase ( self , __a , __a) -> int: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path) return self._write_logs(__a , __a , '''test''') @rank_zero_only def UpperCAmelCase ( self , __a , __a) -> Any: '''simple docstring''' save_json(pl_module.metrics , pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
712
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _a = logging.get_logger(__name__) def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> Tuple: """simple docstring""" _UpperCamelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> str: """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _UpperCamelCase = '''''' else: _UpperCamelCase = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCamelCase = in_proj_bias[: config.hidden_size] _UpperCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCamelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCamelCase = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__snake_case, __snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = dct.pop(__snake_case ) _UpperCamelCase = val def lowerCamelCase__ ( ) -> Dict: """simple docstring""" _UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = ViTConfig() _UpperCamelCase = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": _UpperCamelCase = True _UpperCamelCase = int(vit_name[-12:-10] ) _UpperCamelCase = int(vit_name[-9:-6] ) else: _UpperCamelCase = 10_00 _UpperCamelCase = '''huggingface/label-files''' _UpperCamelCase = '''imagenet-1k-id2label.json''' _UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) ) _UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = int(vit_name[-6:-4] ) _UpperCamelCase = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): _UpperCamelCase = 1_92 _UpperCamelCase = 7_68 _UpperCamelCase = 12 _UpperCamelCase = 3 elif vit_name[9:].startswith('''small''' ): _UpperCamelCase = 3_84 _UpperCamelCase = 15_36 _UpperCamelCase = 12 _UpperCamelCase = 6 else: pass else: if vit_name[4:].startswith('''small''' ): _UpperCamelCase = 7_68 _UpperCamelCase = 23_04 _UpperCamelCase = 8 _UpperCamelCase = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): _UpperCamelCase = 10_24 _UpperCamelCase = 40_96 _UpperCamelCase = 24 _UpperCamelCase = 16 elif vit_name[4:].startswith('''huge''' ): _UpperCamelCase = 12_80 _UpperCamelCase = 51_20 _UpperCamelCase = 32 _UpperCamelCase = 16 # load original model from timm _UpperCamelCase = timm.create_model(__snake_case, pretrained=__snake_case ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCamelCase = timm_model.state_dict() if base_model: remove_classification_head_(__snake_case ) _UpperCamelCase = create_rename_keys(__snake_case, __snake_case ) for src, dest in rename_keys: rename_key(__snake_case, __snake_case, __snake_case ) read_in_q_k_v(__snake_case, __snake_case, __snake_case ) # load HuggingFace model if vit_name[-5:] == "in21k": _UpperCamelCase = ViTModel(__snake_case ).eval() else: _UpperCamelCase = ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: _UpperCamelCase = DeiTImageProcessor(size=config.image_size ) else: _UpperCamelCase = ViTImageProcessor(size=config.image_size ) _UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' ) _UpperCamelCase = encoding['''pixel_values'''] _UpperCamelCase = model(__snake_case ) if base_model: _UpperCamelCase = timm_model.forward_features(__snake_case ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__snake_case, outputs.pooler_output, atol=1e-3 ) else: _UpperCamelCase = timm_model(__snake_case ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__snake_case, outputs.logits, atol=1e-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_patch16_224""", type=str, help="""Name of the ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) _a = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
78
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _a = { """configuration_encodec""": [ """ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""", """EncodecConfig""", ], """feature_extraction_encodec""": ["""EncodecFeatureExtractor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""", """EncodecModel""", """EncodecPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
713
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = seq_length _UpperCamelCase = is_training _UpperCamelCase = use_input_mask _UpperCamelCase = use_token_type_ids _UpperCamelCase = use_labels _UpperCamelCase = vocab_size _UpperCamelCase = embedding_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_hidden_groups _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = num_labels _UpperCamelCase = num_choices _UpperCamelCase = scope def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase = None if self.use_input_mask: _UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase = None if self.use_token_type_ids: _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a) _UpperCamelCase = model(__a , token_type_ids=__a) _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = AlbertForPreTraining(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertForMaskedLM(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = AlbertForQuestionAnswering(config=__a) model.to(__a) model.eval() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = AlbertForSequenceClassification(__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = self.num_labels _UpperCamelCase = AlbertForTokenClassification(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.num_choices _UpperCamelCase = AlbertForMultipleChoice(config=__a) model.to(__a) model.eval() _UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase = model( __a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) lowercase__ = ( { 'feature-extraction': AlbertModel, 'fill-mask': AlbertForMaskedLM, 'question-answering': AlbertForQuestionAnswering, 'text-classification': AlbertForSequenceClassification, 'token-classification': AlbertForTokenClassification, 'zero-shot': AlbertForSequenceClassification, } if is_torch_available() else {} ) lowercase__ = True def UpperCAmelCase ( self , __a , __a , __a=False) -> List[str]: '''simple docstring''' _UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a) if return_labels: if model_class in get_values(__a): _UpperCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a) _UpperCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a) return inputs_dict def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = AlbertModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__a) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a) def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__a) def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a) def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a) def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _UpperCamelCase = type self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = AlbertModel.from_pretrained(__a) self.assertIsNotNone(__a) @require_torch class _UpperCAmelCase( unittest.TestCase ): @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''') _UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]]) _UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _UpperCamelCase = model(__a , attention_mask=__a)[0] _UpperCamelCase = torch.Size((1, 11, 7_68)) self.assertEqual(output.shape , __a) _UpperCamelCase = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
78
0
"""simple docstring""" from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer _a = logging.get_logger(__name__) _a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _a = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } _a = { """facebook/blenderbot_small-90M""": 512, } class _UpperCAmelCase( lowerCamelCase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = BlenderbotSmallTokenizer def __init__( self , __a=None , __a=None , __a="<|endoftext|>" , __a="<|endoftext|>" , __a="<|endoftext|>" , __a=False , __a=True , **__a , ) -> int: '''simple docstring''' super().__init__( ByteLevelBPETokenizer( vocab=__a , merges=__a , add_prefix_space=__a , trim_offsets=__a , ) , bos_token=__a , eos_token=__a , unk_token=__a , **__a , ) _UpperCamelCase = add_prefix_space def UpperCAmelCase ( self , __a , __a=None) -> Tuple: '''simple docstring''' _UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self , __a , __a = None) -> List[int]: '''simple docstring''' _UpperCamelCase = [self.sep_token_id] _UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
714
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowerCamelCase__ ( __snake_case ) -> Optional[int]: """simple docstring""" _UpperCamelCase = np.inf def set_batch_size(__snake_case ) -> None: nonlocal batch_size if isinstance(__snake_case, __snake_case ): _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(__snake_case, __snake_case ): _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(__snake_case, __snake_case ) and feature.dtype == "binary": _UpperCamelCase = min(__snake_case, config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(__snake_case, __snake_case ) return None if batch_size is np.inf else batch_size class _UpperCAmelCase( lowerCamelCase ): def __init__( self , __a , __a = None , __a = None , __a = None , __a = False , __a = False , __a = None , **__a , ) -> Dict: '''simple docstring''' super().__init__( __a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) _UpperCamelCase = path_or_paths if isinstance(__a , __a) else {self.split: path_or_paths} _UpperCamelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCamelCase = Parquet( cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , ) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' # Build iterable dataset if self.streaming: _UpperCamelCase = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None _UpperCamelCase = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) _UpperCamelCase = self.builder.as_dataset( split=self.split , verification_mode=__a , in_memory=self.keep_in_memory) return dataset class _UpperCAmelCase: def __init__( self , __a , __a , __a = None , **__a , ) -> Dict: '''simple docstring''' _UpperCamelCase = dataset _UpperCamelCase = path_or_buf _UpperCamelCase = batch_size or get_writer_batch_size(dataset.features) _UpperCamelCase = parquet_writer_kwargs def UpperCAmelCase ( self) -> int: '''simple docstring''' _UpperCamelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike)): with open(self.path_or_buf , '''wb+''') as buffer: _UpperCamelCase = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs) else: _UpperCamelCase = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs) return written def UpperCAmelCase ( self , __a , __a , **__a) -> int: '''simple docstring''' _UpperCamelCase = 0 _UpperCamelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __a) _UpperCamelCase = self.dataset.features.arrow_schema _UpperCamelCase = pq.ParquetWriter(__a , schema=__a , **__a) for offset in logging.tqdm( range(0 , len(self.dataset) , __a) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCamelCase = query_table( table=self.dataset._data , key=slice(__a , offset + batch_size) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__a) written += batch.nbytes writer.close() return written
78
0
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def lowerCamelCase__ ( __snake_case ) -> Dict: """simple docstring""" _UpperCamelCase = checkpoints.load_tax_checkpoint(__snake_case ) _UpperCamelCase = flatten_dict(__snake_case ) return flax_params def lowerCamelCase__ ( __snake_case ) -> Optional[Any]: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } _UpperCamelCase = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key _UpperCamelCase = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): _UpperCamelCase = new_key.replace(__snake_case, __snake_case ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): _UpperCamelCase = new_key.replace(__snake_case, __snake_case ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number _UpperCamelCase = re.sub(r'''layers_(\d+)''', r'''layer.\1''', __snake_case ) _UpperCamelCase = new_key.replace('''encoder''', '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number _UpperCamelCase = re.sub(r'''layers_(\d+)''', r'''layer.\1''', __snake_case ) _UpperCamelCase = flax_dict[key] _UpperCamelCase = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): _UpperCamelCase = torch.from_numpy(converted_dict[key].T ) else: _UpperCamelCase = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False, __snake_case=False ) -> Any: """simple docstring""" _UpperCamelCase = get_flax_param(__snake_case ) if not use_large: _UpperCamelCase = PixaStructVisionConfig() _UpperCamelCase = PixaStructTextConfig() else: _UpperCamelCase = PixaStructVisionConfig( hidden_size=15_36, d_ff=39_68, num_attention_heads=24, num_hidden_layers=18 ) _UpperCamelCase = PixaStructTextConfig(hidden_size=15_36, d_ff=39_68, num_heads=24, num_layers=18 ) _UpperCamelCase = PixaStructConfig( vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=__snake_case ) _UpperCamelCase = PixaStructForConditionalGeneration(__snake_case ) _UpperCamelCase = rename_and_convert_flax_params(__snake_case ) model.load_state_dict(__snake_case ) _UpperCamelCase = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) _UpperCamelCase = PixaStructImageProcessor() _UpperCamelCase = PixaStructProcessor(image_processor=__snake_case, tokenizer=__snake_case ) if use_large: _UpperCamelCase = 40_96 _UpperCamelCase = True # mkdir if needed os.makedirs(__snake_case, exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) print('''Model saved in {}'''.format(__snake_case ) ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") _a = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
715
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class _UpperCAmelCase( unittest.TestCase ): def __init__( self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , __a=None , __a=True , ) -> int: '''simple docstring''' _UpperCamelCase = size if size is not None else {'''shortest_edge''': 20} _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = num_channels _UpperCamelCase = image_size _UpperCamelCase = min_resolution _UpperCamelCase = max_resolution _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_flip_channel_order def UpperCAmelCase ( self) -> str: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class _UpperCAmelCase( lowerCamelCase , unittest.TestCase ): lowercase__ = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase ( self) -> List[Any]: '''simple docstring''' _UpperCamelCase = MobileViTImageProcessingTester(self) @property def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__a , '''do_resize''')) self.assertTrue(hasattr(__a , '''size''')) self.assertTrue(hasattr(__a , '''do_center_crop''')) self.assertTrue(hasattr(__a , '''center_crop''')) self.assertTrue(hasattr(__a , '''do_flip_channel_order''')) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''shortest_edge''': 20}) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18}) _UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'''shortest_edge''': 42}) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84}) def UpperCAmelCase ( self) -> Dict: '''simple docstring''' pass def UpperCAmelCase ( self) -> str: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a) for image in image_inputs: self.assertIsInstance(__a , Image.Image) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase ( self) -> Tuple: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a) for image in image_inputs: self.assertIsInstance(__a , np.ndarray) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase ( self) -> int: '''simple docstring''' # Initialize image_processing _UpperCamelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a) for image in image_inputs: self.assertIsInstance(__a , torch.Tensor) # Test not batched input _UpperCamelCase = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCamelCase = image_processing(__a , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
78
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor _a = logging.get_logger(__name__) class _UpperCAmelCase( lowerCamelCase ): def __init__( self , *__a , **__a) -> None: '''simple docstring''' warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , __a , ) super().__init__(*__a , **__a)
716
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['image_processor', 'tokenizer'] lowercase__ = 'OwlViTImageProcessor' lowercase__ = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self , __a=None , __a=None , **__a) -> List[Any]: '''simple docstring''' _UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __a , ) _UpperCamelCase = kwargs.pop('''feature_extractor''') _UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''') if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''') super().__init__(__a , __a) def __call__( self , __a=None , __a=None , __a=None , __a="max_length" , __a="np" , **__a) -> List[str]: '''simple docstring''' if text is None and query_images is None and images is None: raise ValueError( '''You have to specify at least one text or query image or image. All three cannot be none.''') if text is not None: if isinstance(__a , __a) or (isinstance(__a , __a) and not isinstance(text[0] , __a)): _UpperCamelCase = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a)] elif isinstance(__a , __a) and isinstance(text[0] , __a): _UpperCamelCase = [] # Maximum number of queries across batch _UpperCamelCase = max([len(__a) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(__a) != max_num_queries: _UpperCamelCase = t + [''' '''] * (max_num_queries - len(__a)) _UpperCamelCase = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a) encodings.append(__a) else: raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''') if return_tensors == "np": _UpperCamelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp _UpperCamelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0) elif return_tensors == "pt" and is_torch_available(): import torch _UpperCamelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0) _UpperCamelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf _UpperCamelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0) _UpperCamelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0) else: raise ValueError('''Target return tensor type could not be returned''') _UpperCamelCase = BatchEncoding() _UpperCamelCase = input_ids _UpperCamelCase = attention_mask if query_images is not None: _UpperCamelCase = BatchEncoding() _UpperCamelCase = self.image_processor( __a , return_tensors=__a , **__a).pixel_values _UpperCamelCase = query_pixel_values if images is not None: _UpperCamelCase = self.image_processor(__a , return_tensors=__a , **__a) if text is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif query_images is not None and images is not None: _UpperCamelCase = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**__a) , tensor_type=__a) def UpperCAmelCase ( self , *__a , **__a) -> str: '''simple docstring''' return self.image_processor.post_process(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Dict: '''simple docstring''' return self.image_processor.post_process_object_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]: '''simple docstring''' return self.image_processor.post_process_image_guided_detection(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[int]: '''simple docstring''' return self.tokenizer.batch_decode(*__a , **__a) def UpperCAmelCase ( self , *__a , **__a) -> Optional[Any]: '''simple docstring''' return self.tokenizer.decode(*__a , **__a) @property def UpperCAmelCase ( self) -> str: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __a , ) return self.image_processor_class @property def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __a , ) return self.image_processor
78
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case, __snake_case ): """simple docstring""" _UpperCamelCase = len(__snake_case ) _UpperCamelCase = len(__snake_case ) _UpperCamelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCamelCase = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCamelCase = True if a[i].islower(): _UpperCamelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _a = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["""PerceiverFeatureExtractor"""] _a = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
78
0
"""simple docstring""" import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( """The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion""" ) _a = None _a = { """7B""": 1_1008, """13B""": 1_3824, """30B""": 1_7920, """65B""": 2_2016, """70B""": 2_8672, } _a = { """7B""": 1, """7Bf""": 1, """13B""": 2, """13Bf""": 2, """30B""": 4, """65B""": 8, """70B""": 8, """70Bf""": 8, } def lowerCamelCase__ ( __snake_case, __snake_case=1, __snake_case=2_56 ) -> Union[str, Any]: """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def lowerCamelCase__ ( __snake_case ) -> List[str]: """simple docstring""" with open(__snake_case, '''r''' ) as f: return json.load(__snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> Optional[Any]: """simple docstring""" with open(__snake_case, '''w''' ) as f: json.dump(__snake_case, __snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case=True ) -> Optional[Any]: """simple docstring""" os.makedirs(__snake_case, exist_ok=__snake_case ) _UpperCamelCase = os.path.join(__snake_case, '''tmp''' ) os.makedirs(__snake_case, exist_ok=__snake_case ) _UpperCamelCase = read_json(os.path.join(__snake_case, '''params.json''' ) ) _UpperCamelCase = NUM_SHARDS[model_size] _UpperCamelCase = params['''n_layers'''] _UpperCamelCase = params['''n_heads'''] _UpperCamelCase = n_heads // num_shards _UpperCamelCase = params['''dim'''] _UpperCamelCase = dim // n_heads _UpperCamelCase = 10000.0 _UpperCamelCase = 1.0 / (base ** (torch.arange(0, __snake_case, 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _UpperCamelCase = params['''n_kv_heads'''] # for GQA / MQA _UpperCamelCase = n_heads_per_shard // num_key_value_heads _UpperCamelCase = dim // num_key_value_heads else: # compatibility with other checkpoints _UpperCamelCase = n_heads _UpperCamelCase = n_heads_per_shard _UpperCamelCase = dim # permute for sliced rotary def permute(__snake_case, __snake_case=n_heads, __snake_case=dim, __snake_case=dim ): return w.view(__snake_case, dima // n_heads // 2, 2, __snake_case ).transpose(1, 2 ).reshape(__snake_case, __snake_case ) print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _UpperCamelCase = torch.load(os.path.join(__snake_case, '''consolidated.00.pth''' ), map_location='''cpu''' ) else: # Sharded _UpperCamelCase = [ torch.load(os.path.join(__snake_case, F'''consolidated.{i:02d}.pth''' ), map_location='''cpu''' ) for i in range(__snake_case ) ] _UpperCamelCase = 0 _UpperCamelCase = {'''weight_map''': {}} for layer_i in range(__snake_case ): _UpperCamelCase = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded _UpperCamelCase = { F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute( loaded[F'''layers.{layer_i}.attention.wq.weight'''] ), F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute( loaded[F'''layers.{layer_i}.attention.wk.weight'''] ), F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''], F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''], F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''], F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''], F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''], F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''], F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _UpperCamelCase = { F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][ F'''layers.{layer_i}.attention_norm.weight''' ].clone(), F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][ F'''layers.{layer_i}.ffn_norm.weight''' ].clone(), } _UpperCamelCase = permute( torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(__snake_case, __snake_case, __snake_case ) for i in range(__snake_case ) ], dim=0, ).reshape(__snake_case, __snake_case ) ) _UpperCamelCase = permute( torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view( __snake_case, __snake_case, __snake_case ) for i in range(__snake_case ) ], dim=0, ).reshape(__snake_case, __snake_case ), __snake_case, __snake_case, __snake_case, ) _UpperCamelCase = torch.cat( [ loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view( __snake_case, __snake_case, __snake_case ) for i in range(__snake_case ) ], dim=0, ).reshape(__snake_case, __snake_case ) _UpperCamelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(__snake_case )], dim=1 ) _UpperCamelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(__snake_case )], dim=0 ) _UpperCamelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(__snake_case )], dim=1 ) _UpperCamelCase = torch.cat( [loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(__snake_case )], dim=0 ) _UpperCamelCase = inv_freq for k, v in state_dict.items(): _UpperCamelCase = filename param_count += v.numel() torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) ) _UpperCamelCase = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin''' if model_size == "7B": # Unsharded _UpperCamelCase = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: _UpperCamelCase = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]['''tok_embeddings.weight'''] for i in range(__snake_case )], dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(__snake_case )], dim=0 ), } for k, v in state_dict.items(): _UpperCamelCase = filename param_count += v.numel() torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) ) # Write configs _UpperCamelCase = {'''total_size''': param_count * 2} write_json(__snake_case, os.path.join(__snake_case, '''pytorch_model.bin.index.json''' ) ) _UpperCamelCase = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 _UpperCamelCase = params['''multiple_of'''] if '''multiple_of''' in params else 2_56 _UpperCamelCase = LlamaConfig( hidden_size=__snake_case, intermediate_size=compute_intermediate_size(__snake_case, __snake_case, __snake_case ), num_attention_heads=params['''n_heads'''], num_hidden_layers=params['''n_layers'''], rms_norm_eps=params['''norm_eps'''], num_key_value_heads=__snake_case, ) config.save_pretrained(__snake_case ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print('''Loading the checkpoint in a Llama model.''' ) _UpperCamelCase = LlamaForCausalLM.from_pretrained(__snake_case, torch_dtype=torch.floataa, low_cpu_mem_usage=__snake_case ) # Avoid saving this as part of the config. del model.config._name_or_path print('''Saving in the Transformers format.''' ) model.save_pretrained(__snake_case, safe_serialization=__snake_case ) shutil.rmtree(__snake_case ) def lowerCamelCase__ ( __snake_case, __snake_case ) -> Any: """simple docstring""" _UpperCamelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' ) _UpperCamelCase = tokenizer_class(__snake_case ) tokenizer.save_pretrained(__snake_case ) def lowerCamelCase__ ( ) -> int: """simple docstring""" _UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--input_dir''', help='''Location of LLaMA weights, which contains tokenizer.model and model folders''', ) parser.add_argument( '''--model_size''', choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''], ) parser.add_argument( '''--output_dir''', help='''Location to write HF model and tokenizer''', ) parser.add_argument('''--safe_serialization''', type=__snake_case, help='''Whether or not to save using `safetensors`.''' ) _UpperCamelCase = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir, input_base_path=os.path.join(args.input_dir, args.model_size ), model_size=args.model_size, safe_serialization=args.safe_serialization, ) _UpperCamelCase = os.path.join(args.input_dir, '''tokenizer.model''' ) write_tokenizer(args.output_dir, __snake_case ) if __name__ == "__main__": main()
718
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class _UpperCAmelCase: def __init__( self , __a , __a=13 , __a=2 , __a=24 , __a=16 , __a=True , __a=True , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=10 , __a=0.02 , __a=None , __a=2 , __a=2 , ) -> List[str]: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = patch_size _UpperCamelCase = max_length _UpperCamelCase = num_mel_bins _UpperCamelCase = is_training _UpperCamelCase = use_labels _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_act _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = type_sequence_label_size _UpperCamelCase = initializer_range _UpperCamelCase = scope _UpperCamelCase = frequency_stride _UpperCamelCase = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCamelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCamelCase = (self.max_length - self.patch_size) // self.time_stride + 1 _UpperCamelCase = frequency_out_dimension * time_out_dimension _UpperCamelCase = num_patches + 2 def UpperCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) _UpperCamelCase = None if self.use_labels: _UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase = self.get_config() return config, input_values, labels def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def UpperCAmelCase ( self , __a , __a , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = ASTModel(config=__a) model.to(__a) model.eval() _UpperCamelCase = model(__a) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) = config_and_inputs _UpperCamelCase = {'''input_values''': input_values} return config, inputs_dict @require_torch class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ): lowercase__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) lowercase__ = ( {'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel} if is_torch_available() else {} ) lowercase__ = False lowercase__ = False lowercase__ = False lowercase__ = False def UpperCAmelCase ( self , __a , __a , __a , __a , __a) -> Optional[Any]: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def UpperCAmelCase ( self) -> Any: '''simple docstring''' _UpperCamelCase = ASTModelTester(self) _UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37) def UpperCAmelCase ( self) -> int: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''AST does not use inputs_embeds''') def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' pass def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCamelCase = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear)) def UpperCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase = model_class(__a) _UpperCamelCase = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase = [*signature.parameters.keys()] _UpperCamelCase = ['''input_values'''] self.assertListEqual(arg_names[:1] , __a) def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a) @slow def UpperCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase = ASTModel.from_pretrained(__a) self.assertIsNotNone(__a) def lowerCamelCase__ ( ) -> List[str]: """simple docstring""" _UpperCamelCase = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''', filename='''sample_audio.flac''', repo_type='''dataset''' ) _UpperCamelCase , _UpperCamelCase = torchaudio.load(__snake_case ) return audio, sampling_rate @require_torch @require_torchaudio class _UpperCAmelCase( unittest.TestCase ): @cached_property def UpperCAmelCase ( self) -> Dict: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''') if is_torchaudio_available() else None ) @slow def UpperCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCamelCase = self.default_feature_extractor _UpperCamelCase = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''').to(__a) _UpperCamelCase = self.default_feature_extractor _UpperCamelCase , _UpperCamelCase = prepare_audio() _UpperCamelCase = audio.squeeze().numpy() _UpperCamelCase = feature_extractor(__a , sampling_rate=__a , return_tensors='''pt''').to(__a) # forward pass with torch.no_grad(): _UpperCamelCase = model(**__a) # verify the logits _UpperCamelCase = torch.Size((1, 5_27)) self.assertEqual(outputs.logits.shape , __a) _UpperCamelCase = torch.tensor([-0.8760, -7.0042, -8.6602]).to(__a) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4))
78
0
"""simple docstring""" import copy import re class _UpperCAmelCase: lowercase__ = 'hp' lowercase__ = {} lowercase__ = None @classmethod def UpperCAmelCase ( cls , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = prefix _UpperCamelCase = defaults cls.build_naming_info() @staticmethod def UpperCAmelCase ( __a , __a) -> Union[str, Any]: '''simple docstring''' if len(__a) == 0: return "" _UpperCamelCase = None if any(char.isdigit() for char in word): raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a) + 1): _UpperCamelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCamelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a): _UpperCamelCase = '''''' while integer != 0: _UpperCamelCase = chr(ord('''A''') + integer % 10) + s integer //= 10 return s _UpperCamelCase = 0 while True: _UpperCamelCase = word + '''#''' + int_to_alphabetic(__a) if sword in info["reverse_short_word"]: continue else: _UpperCamelCase = sword break _UpperCamelCase = short_word _UpperCamelCase = word return short_word @staticmethod def UpperCAmelCase ( __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = param_name.split('''_''') _UpperCamelCase = [TrialShortNamer.shortname_for_word(__a , __a) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCamelCase = ['''''', '''_'''] for separator in separators: _UpperCamelCase = separator.join(__a) if shortname not in info["reverse_short_param"]: _UpperCamelCase = shortname _UpperCamelCase = param_name return shortname return param_name @staticmethod def UpperCAmelCase ( __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = TrialShortNamer.shortname_for_key(__a , __a) _UpperCamelCase = short_name _UpperCamelCase = param_name @classmethod def UpperCAmelCase ( cls) -> Any: '''simple docstring''' if cls.NAMING_INFO is not None: return _UpperCamelCase = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _UpperCamelCase = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(__a , __a) _UpperCamelCase = info @classmethod def UpperCAmelCase ( cls , __a) -> Optional[Any]: '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _UpperCamelCase = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'''You should provide a default value for the param name {k} with value {v}''') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCamelCase = cls.NAMING_INFO['''short_param'''][k] if isinstance(__a , __a): _UpperCamelCase = 1 if v else 0 _UpperCamelCase = '''''' if isinstance(__a , (int, float)) else '''-''' _UpperCamelCase = F'''{key}{sep}{v}''' name.append(__a) return "_".join(__a) @classmethod def UpperCAmelCase ( cls , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = repr[len(cls.PREFIX) + 1 :] if repr == "": _UpperCamelCase = [] else: _UpperCamelCase = repr.split('''_''') _UpperCamelCase = {} for value in values: if "-" in value: _UpperCamelCase , _UpperCamelCase = value.split('''-''') else: _UpperCamelCase = re.sub('''[0-9.]''' , '''''' , __a) _UpperCamelCase = float(re.sub('''[^0-9.]''' , '''''' , __a)) _UpperCamelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k] _UpperCamelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCamelCase = cls.DEFAULTS[k] return parameters
719
"""simple docstring""" def lowerCamelCase__ ( ) -> list[list[int]]: """simple docstring""" return [list(range(10_00 - i, -10_00 - i, -1 ) ) for i in range(10_00 )] _a = generate_large_matrix() _a = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase__ ( __snake_case ) -> None: """simple docstring""" assert all(row == sorted(__snake_case, reverse=__snake_case ) for row in grid ) assert all(list(__snake_case ) == sorted(__snake_case, reverse=__snake_case ) for col in zip(*__snake_case ) ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(__snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: _UpperCamelCase = (left + right) // 2 _UpperCamelCase = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: _UpperCamelCase = mid + 1 else: _UpperCamelCase = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__snake_case ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(grid[0] ) for i in range(len(__snake_case ) ): _UpperCamelCase = find_negative_index(grid[i][:bound] ) total += bound return (len(__snake_case ) * len(grid[0] )) - total def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase__ ( __snake_case ) -> int: """simple docstring""" _UpperCamelCase = 0 for row in grid: for i, number in enumerate(__snake_case ): if number < 0: total += len(__snake_case ) - i break return total def lowerCamelCase__ ( ) -> None: """simple docstring""" from timeit import timeit print('''Running benchmarks''' ) _UpperCamelCase = ( '''from __main__ import count_negatives_binary_search, ''' '''count_negatives_brute_force, count_negatives_brute_force_with_break, grid''' ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): _UpperCamelCase = timeit(F'''{func}(grid=grid)''', setup=__snake_case, number=5_00 ) print(F'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
78
0
"""simple docstring""" _a = { """A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""", """H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""", """O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""", """V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""", """2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""", """8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""", """:""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""", """?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""", """(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/""" } # Exclamation mark is not in ITU-R recommendation # fmt: on _a = {value: key for key, value in MORSE_CODE_DICT.items()} def lowerCamelCase__ ( __snake_case ) -> str: return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def lowerCamelCase__ ( __snake_case ) -> str: return "".join(REVERSE_DICT[char] for char in message.split() ) def lowerCamelCase__ ( ) -> None: _UpperCamelCase = '''Morse code here!''' print(__snake_case ) _UpperCamelCase = encrypt(__snake_case ) print(__snake_case ) _UpperCamelCase = decrypt(__snake_case ) print(__snake_case ) if __name__ == "__main__": main()
720
"""simple docstring""" import copy import re class _UpperCAmelCase: lowercase__ = 'hp' lowercase__ = {} lowercase__ = None @classmethod def UpperCAmelCase ( cls , __a , __a) -> Dict: '''simple docstring''' _UpperCamelCase = prefix _UpperCamelCase = defaults cls.build_naming_info() @staticmethod def UpperCAmelCase ( __a , __a) -> Union[str, Any]: '''simple docstring''' if len(__a) == 0: return "" _UpperCamelCase = None if any(char.isdigit() for char in word): raise Exception(F'''Parameters should not contain numbers: \'{word}\' contains a number''') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(__a) + 1): _UpperCamelCase = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: _UpperCamelCase = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(__a): _UpperCamelCase = '''''' while integer != 0: _UpperCamelCase = chr(ord('''A''') + integer % 10) + s integer //= 10 return s _UpperCamelCase = 0 while True: _UpperCamelCase = word + '''#''' + int_to_alphabetic(__a) if sword in info["reverse_short_word"]: continue else: _UpperCamelCase = sword break _UpperCamelCase = short_word _UpperCamelCase = word return short_word @staticmethod def UpperCAmelCase ( __a , __a) -> Any: '''simple docstring''' _UpperCamelCase = param_name.split('''_''') _UpperCamelCase = [TrialShortNamer.shortname_for_word(__a , __a) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name _UpperCamelCase = ['''''', '''_'''] for separator in separators: _UpperCamelCase = separator.join(__a) if shortname not in info["reverse_short_param"]: _UpperCamelCase = shortname _UpperCamelCase = param_name return shortname return param_name @staticmethod def UpperCAmelCase ( __a , __a) -> List[str]: '''simple docstring''' _UpperCamelCase = TrialShortNamer.shortname_for_key(__a , __a) _UpperCamelCase = short_name _UpperCamelCase = param_name @classmethod def UpperCAmelCase ( cls) -> Any: '''simple docstring''' if cls.NAMING_INFO is not None: return _UpperCamelCase = { '''short_word''': {}, '''reverse_short_word''': {}, '''short_param''': {}, '''reverse_short_param''': {}, } _UpperCamelCase = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(__a , __a) _UpperCamelCase = info @classmethod def UpperCAmelCase ( cls , __a) -> Optional[Any]: '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None _UpperCamelCase = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'''You should provide a default value for the param name {k} with value {v}''') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue _UpperCamelCase = cls.NAMING_INFO['''short_param'''][k] if isinstance(__a , __a): _UpperCamelCase = 1 if v else 0 _UpperCamelCase = '''''' if isinstance(__a , (int, float)) else '''-''' _UpperCamelCase = F'''{key}{sep}{v}''' name.append(__a) return "_".join(__a) @classmethod def UpperCAmelCase ( cls , __a) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase = repr[len(cls.PREFIX) + 1 :] if repr == "": _UpperCamelCase = [] else: _UpperCamelCase = repr.split('''_''') _UpperCamelCase = {} for value in values: if "-" in value: _UpperCamelCase , _UpperCamelCase = value.split('''-''') else: _UpperCamelCase = re.sub('''[0-9.]''' , '''''' , __a) _UpperCamelCase = float(re.sub('''[^0-9.]''' , '''''' , __a)) _UpperCamelCase = cls.NAMING_INFO['''reverse_short_param'''][p_k] _UpperCamelCase = p_v for k in cls.DEFAULTS: if k not in parameters: _UpperCamelCase = cls.DEFAULTS[k] return parameters
78
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _a = logging.get_logger(__name__) class _UpperCAmelCase( lowerCamelCase ): lowercase__ = ['pixel_values'] def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , **__a , ) -> None: '''simple docstring''' super().__init__(**__a) _UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_56} _UpperCamelCase = get_size_dict(__a , default_to_square=__a) _UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} _UpperCamelCase = get_size_dict(__a) _UpperCamelCase = do_resize _UpperCamelCase = size _UpperCamelCase = resample _UpperCamelCase = do_center_crop _UpperCamelCase = crop_size _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_normalize _UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(__a , default_to_square=__a) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''') _UpperCamelCase = get_resize_output_image_size(__a , size=size['''shortest_edge'''] , default_to_square=__a) return resize(__a , size=__a , resample=__a , data_format=__a , **__a) def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> np.ndarray: '''simple docstring''' _UpperCamelCase = get_size_dict(__a) return center_crop(__a , size=(size['''height'''], size['''width''']) , data_format=__a , **__a) def UpperCAmelCase ( self , __a , __a , __a = None , **__a) -> np.ndarray: '''simple docstring''' return rescale(__a , scale=__a , data_format=__a , **__a) def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> np.ndarray: '''simple docstring''' return normalize(__a , mean=__a , std=__a , data_format=__a , **__a) def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = do_resize if do_resize is not None else self.do_resize _UpperCamelCase = size if size is not None else self.size _UpperCamelCase = get_size_dict(__a , default_to_square=__a) _UpperCamelCase = resample if resample is not None else self.resample _UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCamelCase = crop_size if crop_size is not None else self.crop_size _UpperCamelCase = get_size_dict(__a) _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize _UpperCamelCase = image_mean if image_mean is not None else self.image_mean _UpperCamelCase = image_std if image_std is not None else self.image_std _UpperCamelCase = make_list_of_images(__a) if not valid_images(__a): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(__a) for image in images] if do_resize: _UpperCamelCase = [self.resize(image=__a , size=__a , resample=__a) for image in images] if do_center_crop: _UpperCamelCase = [self.center_crop(image=__a , size=__a) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=__a , scale=__a) for image in images] if do_normalize: _UpperCamelCase = [self.normalize(image=__a , mean=__a , std=__a) for image in images] _UpperCamelCase = [to_channel_dimension_format(__a , __a) for image in images] _UpperCamelCase = {'''pixel_values''': images} return BatchFeature(data=__a , tensor_type=__a)
721
"""simple docstring""" import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) ) _UpperCamelCase = FileLock(str(tmpdir / '''foo.lock''' ) ) _UpperCamelCase = 0.01 with locka.acquire(): with pytest.raises(__snake_case ): _UpperCamelCase = time.time() locka.acquire(__snake_case ) assert time.time() - _start > timeout def lowerCamelCase__ ( __snake_case ) -> List[Any]: """simple docstring""" _UpperCamelCase = '''a''' * 10_00 + '''.lock''' _UpperCamelCase = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith('''.lock''' ) assert not locka._lock_file.endswith(__snake_case ) assert len(os.path.basename(locka._lock_file ) ) <= 2_55 _UpperCamelCase = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(__snake_case ): locka.acquire(0 )
78
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""PerceiverFeatureExtractor"""] __UpperCAmelCase = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
1
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''informer''' SCREAMING_SNAKE_CASE__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self : Tuple , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "student_t" , lowerCamelCase_ : str = "nll" , lowerCamelCase_ : int = 1 , lowerCamelCase_ : List[int] = None , lowerCamelCase_ : Optional[Union[str, bool]] = "mean" , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : int = 64 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : int = 2 , lowerCamelCase_ : bool = True , lowerCamelCase_ : str = "gelu" , lowerCamelCase_ : float = 0.05 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : Any=True , lowerCamelCase_ : str = "prob" , lowerCamelCase_ : int = 5 , lowerCamelCase_ : bool = True , **lowerCamelCase_ : List[Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = prediction_length SCREAMING_SNAKE_CASE : Tuple = context_length or prediction_length SCREAMING_SNAKE_CASE : Dict = distribution_output SCREAMING_SNAKE_CASE : Optional[Any] = loss SCREAMING_SNAKE_CASE : Optional[Any] = input_size SCREAMING_SNAKE_CASE : Dict = num_time_features SCREAMING_SNAKE_CASE : Dict = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] SCREAMING_SNAKE_CASE : Optional[int] = scaling SCREAMING_SNAKE_CASE : List[Any] = num_dynamic_real_features SCREAMING_SNAKE_CASE : str = num_static_real_features SCREAMING_SNAKE_CASE : Union[str, Any] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(lowerCamelCase_ ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) SCREAMING_SNAKE_CASE : Optional[Any] = cardinality else: SCREAMING_SNAKE_CASE : Any = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(lowerCamelCase_ ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) SCREAMING_SNAKE_CASE : List[str] = embedding_dimension else: SCREAMING_SNAKE_CASE : Union[str, Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] SCREAMING_SNAKE_CASE : Optional[int] = num_parallel_samples # Transformer architecture configuration SCREAMING_SNAKE_CASE : List[str] = input_size * len(self.lags_sequence ) + self._number_of_features SCREAMING_SNAKE_CASE : str = d_model SCREAMING_SNAKE_CASE : Union[str, Any] = encoder_attention_heads SCREAMING_SNAKE_CASE : List[Any] = decoder_attention_heads SCREAMING_SNAKE_CASE : int = encoder_ffn_dim SCREAMING_SNAKE_CASE : List[Any] = decoder_ffn_dim SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers SCREAMING_SNAKE_CASE : str = decoder_layers SCREAMING_SNAKE_CASE : List[Any] = dropout SCREAMING_SNAKE_CASE : Tuple = attention_dropout SCREAMING_SNAKE_CASE : Any = activation_dropout SCREAMING_SNAKE_CASE : Dict = encoder_layerdrop SCREAMING_SNAKE_CASE : Tuple = decoder_layerdrop SCREAMING_SNAKE_CASE : Tuple = activation_function SCREAMING_SNAKE_CASE : Any = init_std SCREAMING_SNAKE_CASE : Dict = use_cache # Informer SCREAMING_SNAKE_CASE : List[Any] = attention_type SCREAMING_SNAKE_CASE : Tuple = sampling_factor SCREAMING_SNAKE_CASE : Any = distil super().__init__(is_encoder_decoder=lowerCamelCase_ , **lowerCamelCase_ ) @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
79
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
1
'''simple docstring''' import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger() def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True ): """simple docstring""" print(f'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 1_28: if name[-1] == "S": SCREAMING_SNAKE_CASE : Tuple = timm.create_model("""levit_128s""" , pretrained=lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Optional[Any] = timm.create_model("""levit_128""" , pretrained=lowerCamelCase_ ) if hidden_sizes == 1_92: SCREAMING_SNAKE_CASE : Optional[Any] = timm.create_model("""levit_192""" , pretrained=lowerCamelCase_ ) if hidden_sizes == 2_56: SCREAMING_SNAKE_CASE : Optional[Any] = timm.create_model("""levit_256""" , pretrained=lowerCamelCase_ ) if hidden_sizes == 3_84: SCREAMING_SNAKE_CASE : Optional[Any] = timm.create_model("""levit_384""" , pretrained=lowerCamelCase_ ) from_model.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = LevitForImageClassificationWithTeacher(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE : Tuple = OrderedDict() SCREAMING_SNAKE_CASE : Union[str, Any] = from_model.state_dict() SCREAMING_SNAKE_CASE : Optional[Any] = list(from_model.state_dict().keys() ) SCREAMING_SNAKE_CASE : Dict = list(our_model.state_dict().keys() ) print(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for i in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : Optional[Any] = weights[og_keys[i]] our_model.load_state_dict(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = torch.randn((2, 3, 2_24, 2_24) ) SCREAMING_SNAKE_CASE : Optional[int] = from_model(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = our_model(lowerCamelCase_ ).logits assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ ), "The model logits don't match the original one." SCREAMING_SNAKE_CASE : Any = name print(lowerCamelCase_ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) SCREAMING_SNAKE_CASE : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(f'''Pushed {checkpoint_name}''' ) def __A ( lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = True ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = """imagenet-1k-id2label.json""" SCREAMING_SNAKE_CASE : Optional[Any] = 10_00 SCREAMING_SNAKE_CASE : Tuple = (1, num_labels) SCREAMING_SNAKE_CASE : List[Any] = """huggingface/label-files""" SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) SCREAMING_SNAKE_CASE : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : List[Any] = idalabel SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in idalabel.items()} SCREAMING_SNAKE_CASE : Optional[Any] = partial(lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = { """levit-128S""": 1_28, """levit-128""": 1_28, """levit-192""": 1_92, """levit-256""": 2_56, """levit-384""": 3_84, } SCREAMING_SNAKE_CASE : str = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , lowerCamelCase_ , names_to_config[model_name] , lowerCamelCase_ , lowerCamelCase_ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) return config, expected_shape if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) __UpperCAmelCase = parser.parse_args() __UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
1
'''simple docstring''' import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """pytorch_model.bin""" @dataclasses.dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = dataclasses.field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , ) @dataclasses.dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} ) SCREAMING_SNAKE_CASE__ = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''The name of the task to train on.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''The list of labels for the task.'''} ) @dataclasses.dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = dataclasses.field( metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default='''no''' , metadata={ '''help''': '''The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]''' } , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=0.0 , metadata={ '''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.''' } , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , ) SCREAMING_SNAKE_CASE__ = dataclasses.field( default=lowercase_ , metadata={'''help''': '''Random seed for initialization.'''} , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: SCREAMING_SNAKE_CASE : str = dataset.filter(lambda lowerCamelCase_ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 SCREAMING_SNAKE_CASE : Optional[Any] = int(eval_result * len(lowerCamelCase_ ) ) print(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = dataset.sort("""probability""" , reverse=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = dataset.select(range(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Dict = dataset.remove_columns(["""label""", """probability"""] ) SCREAMING_SNAKE_CASE : Dict = dataset.rename_column("""prediction""" , """label""" ) SCREAMING_SNAKE_CASE : str = dataset.map(lambda lowerCamelCase_ : {"label": idalabel[example["label"]]} ) SCREAMING_SNAKE_CASE : Tuple = dataset.shuffle(seed=args.seed ) SCREAMING_SNAKE_CASE : List[str] = os.path.join(lowerCamelCase_ , f'''train_pseudo.{args.data_file_extension}''' ) if args.data_file_extension == "csv": dataset.to_csv(lowerCamelCase_ , index=lowerCamelCase_ ) else: dataset.to_json(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() SCREAMING_SNAKE_CASE : Any = STModelArguments(model_name_or_path=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = STDataArguments(train_file=lowerCamelCase_ , infer_file=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = STTrainingArguments(output_dir=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(lowerCamelCase_ ).items(): setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for key, value in kwargs.items(): if hasattr(lowerCamelCase_ , lowerCamelCase_ ): setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Sanity checks SCREAMING_SNAKE_CASE : Optional[Any] = {} SCREAMING_SNAKE_CASE : str = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None SCREAMING_SNAKE_CASE : List[str] = args.train_file SCREAMING_SNAKE_CASE : int = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None SCREAMING_SNAKE_CASE : List[str] = args.eval_file for key in data_files: SCREAMING_SNAKE_CASE : Optional[Any] = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: SCREAMING_SNAKE_CASE : Union[str, Any] = extension else: assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) SCREAMING_SNAKE_CASE : Optional[Any] = f'''{args.output_dir}/self-train_iter-{{}}'''.format SCREAMING_SNAKE_CASE : Dict = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ ) os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) accelerator.wait_for_everyone() SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : str = None SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Dict = False # Show the progress bar SCREAMING_SNAKE_CASE : Dict = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): SCREAMING_SNAKE_CASE : str = data_dir_format(lowerCamelCase_ ) assert os.path.exists(lowerCamelCase_ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , """stage-1""" ) SCREAMING_SNAKE_CASE : Tuple = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(lowerCamelCase_ , lowerCamelCase_ ): arguments_dict.update({key: value} ) SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , """best-checkpoint""" , lowerCamelCase_ ) if os.path.exists(lowerCamelCase_ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , lowerCamelCase_ , lowerCamelCase_ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , lowerCamelCase_ ) finetune(**lowerCamelCase_ ) accelerator.wait_for_everyone() assert os.path.exists(lowerCamelCase_ ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , lowerCamelCase_ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(lowerCamelCase_ , """best-checkpoint""" ) SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , """stage-2""" ) # Update arguments_dict SCREAMING_SNAKE_CASE : Optional[Any] = model_path SCREAMING_SNAKE_CASE : Any = data_files["""train"""] SCREAMING_SNAKE_CASE : int = current_output_dir SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , """best-checkpoint""" , lowerCamelCase_ ) if os.path.exists(lowerCamelCase_ ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , lowerCamelCase_ , lowerCamelCase_ , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , lowerCamelCase_ ) finetune(**lowerCamelCase_ ) accelerator.wait_for_everyone() assert os.path.exists(lowerCamelCase_ ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = iteration SCREAMING_SNAKE_CASE : List[str] = data_dir_format(iteration + 1 ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(os.path.join(lowerCamelCase_ , """best-checkpoint""" ) ) SCREAMING_SNAKE_CASE : Optional[int] = config.idalabel SCREAMING_SNAKE_CASE : List[str] = os.path.join(lowerCamelCase_ , """eval_results_best-checkpoint.json""" ) SCREAMING_SNAKE_CASE : Any = os.path.join(lowerCamelCase_ , """test_results_best-checkpoint.json""" ) assert os.path.exists(lowerCamelCase_ ) with open(lowerCamelCase_ , """r""" ) as f: SCREAMING_SNAKE_CASE : Optional[Any] = float(json.load(lowerCamelCase_ )[args.eval_metric] ) SCREAMING_SNAKE_CASE : Dict = os.path.join(lowerCamelCase_ , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(lowerCamelCase_ ) # Loading the dataset from local csv or json files. SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ ) shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , f'''eval_results_iter-{iteration}.json''' ) ) if os.path.exists(lowerCamelCase_ ): shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , f'''test_results_iter-{iteration}.json''' ) ) create_pseudo_labeled_data(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) accelerator.wait_for_everyone() SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , f'''train_pseudo.{args.data_file_extension}''' ) if args.evaluation_strategy != IntervalStrategy.NO.value: SCREAMING_SNAKE_CASE : List[Any] = eval_result if best_iteration is None: SCREAMING_SNAKE_CASE : Union[str, Any] = new_iteration SCREAMING_SNAKE_CASE : int = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: SCREAMING_SNAKE_CASE : Union[str, Any] = new_iteration SCREAMING_SNAKE_CASE : Optional[int] = new_eval_result SCREAMING_SNAKE_CASE : str = 0 else: if new_eval_result == best_eval_result: SCREAMING_SNAKE_CASE : Tuple = new_iteration SCREAMING_SNAKE_CASE : Optional[int] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: SCREAMING_SNAKE_CASE : str = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , lowerCamelCase_ ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowerCamelCase_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowerCamelCase_ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCamelCase_ , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , lowerCamelCase_ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowerCamelCase_ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCamelCase_ , """eval_results_best-iteration.json""" ) , )
79
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
1
'''simple docstring''' import requests __UpperCAmelCase = """""" # <-- Put your OpenWeatherMap appid here! __UpperCAmelCase = """https://api.openweathermap.org/data/2.5/""" def __A ( lowerCamelCase_ = "Chicago" , lowerCamelCase_ = APPID ): """simple docstring""" return requests.get(URL_BASE + """weather""" , params=locals() ).json() def __A ( lowerCamelCase_ = "Kolkata, India" , lowerCamelCase_ = APPID ): """simple docstring""" return requests.get(URL_BASE + """forecast""" , params=locals() ).json() def __A ( lowerCamelCase_ = 55.68 , lowerCamelCase_ = 12.57 , lowerCamelCase_ = APPID ): """simple docstring""" return requests.get(URL_BASE + """onecall""" , params=locals() ).json() if __name__ == "__main__": from pprint import pprint while True: __UpperCAmelCase = input("""Enter a location:""").strip() if location: pprint(current_weather(location)) else: break
79
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} __UpperCAmelCase = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } __UpperCAmelCase = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } __UpperCAmelCase = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ = RoFormerTokenizer def __init__( self : Any , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Optional[Any]="[UNK]" , lowerCamelCase_ : int="[SEP]" , lowerCamelCase_ : int="[PAD]" , lowerCamelCase_ : Dict="[CLS]" , lowerCamelCase_ : List[str]="[MASK]" , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=None , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( pre_tok_state.get("""lowercase""" , lowerCamelCase_ ) != do_lower_case or pre_tok_state.get("""strip_accents""" , lowerCamelCase_ ) != strip_accents ): SCREAMING_SNAKE_CASE : Tuple = getattr(lowerCamelCase_ , pre_tok_state.pop("""type""" ) ) SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case SCREAMING_SNAKE_CASE : Any = strip_accents SCREAMING_SNAKE_CASE : List[Any] = pre_tok_class(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = do_lower_case def __getstate__( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : str = BertPreTokenizer() return state def __setstate__( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = d SCREAMING_SNAKE_CASE : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab() SCREAMING_SNAKE_CASE : Tuple = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple=None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ ) return tuple(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=False , **lowerCamelCase_ : Union[str, Any] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = BertPreTokenizer() return super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
1
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
1
'''simple docstring''' from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version(""">=""", """0.0.12""") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
79
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCAmelCase = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
1
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __UpperCAmelCase = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def __A ( lowerCamelCase_=True ): """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowercase_ ) ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' with TemporaryDirectory() as tmp_dir: SCREAMING_SNAKE_CASE : Optional[Any] = dataset_module_factory(lowerCamelCase_ , cache_dir=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = import_main_class(dataset_module.module_path , dataset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls( cache_dir=lowerCamelCase_ , config_name=lowerCamelCase_ , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE : Union[str, Any] = """/""".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=lowerCamelCase_ ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) SCREAMING_SNAKE_CASE : Optional[int] = cached_path(lowerCamelCase_ , cache_dir=lowerCamelCase_ ) self.assertTrue(os.path.exists(lowerCamelCase_ ) ) @pytest.mark.integration def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple""" SCREAMING_SNAKE_CASE : Any = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = import_main_class(dataset_module.module_path ) SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls( cache_dir=lowerCamelCase_ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam SCREAMING_SNAKE_CASE : str = None builder_instance.download_and_prepare() SCREAMING_SNAKE_CASE : str = builder_instance.as_dataset() assert ds @pytest.mark.integration def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = dataset_module_factory("""wikipedia""" , cache_dir=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = import_main_class(dataset_module.module_path , dataset=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : DatasetBuilder = builder_cls( cache_dir=lowerCamelCase_ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) SCREAMING_SNAKE_CASE : Any = builder_instance.as_streaming_dataset() assert ds assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) assert "train" in ds assert isinstance(ds["""train"""] , lowerCamelCase_ ) assert next(iter(ds["""train"""] ) )
79
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """Hello world! cécé herlolip""" __UpperCAmelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __UpperCAmelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
79
1
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __UpperCAmelCase = 256047 __UpperCAmelCase = 256145 @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = NllbTokenizer SCREAMING_SNAKE_CASE__ = NllbTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {} def lowerCamelCase_ ( self : int ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE : Union[str, Any] = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = NllbTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ ) self.assertListEqual( lowerCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : List[str] = tokenizer_r.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) SCREAMING_SNAKE_CASE : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : Dict = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=True SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCamelCase_ , lowerCamelCase_ ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : str = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) # Save tokenizer rust, legacy_format=False SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : Dict = tokenizer_r.save_pretrained(lowerCamelCase_ , legacy_format=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer_p.save_pretrained(lowerCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way SCREAMING_SNAKE_CASE : int = tokenizer_r.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_p.from_pretrained(lowerCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCamelCase_ , lowerCamelCase_ ) ) shutil.rmtree(lowerCamelCase_ ) @require_torch def lowerCamelCase_ ( self : Dict ): '''simple docstring''' if not self.test_seqaseq: return SCREAMING_SNAKE_CASE : int = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. SCREAMING_SNAKE_CASE : Optional[int] = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] SCREAMING_SNAKE_CASE : Tuple = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 10 ) # max_target_length will default to max_length if not specified SCREAMING_SNAKE_CASE : List[str] = tokenizer.prepare_seqaseq_batch( lowerCamelCase_ , tgt_texts=lowerCamelCase_ , max_length=3 , return_tensors="""pt""" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.prepare_seqaseq_batch( src_texts=lowerCamelCase_ , max_length=3 , max_target_length=10 , return_tensors="""pt""" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("""decoder_input_ids""" , lowerCamelCase_ ) @unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" ) def lowerCamelCase_ ( self : str ): '''simple docstring''' pass def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : int = [AddedToken("""<special>""" , lstrip=lowerCamelCase_ )] SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode("""Hey this is a <special> token""" ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode("""<special>""" , add_special_tokens=lowerCamelCase_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: SCREAMING_SNAKE_CASE : Dict = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer_class.from_pretrained( lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_p.encode("""Hey this is a <special> token""" ) SCREAMING_SNAKE_CASE : Any = tokenizer_cr.encode("""Hey this is a <special> token""" ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''facebook/nllb-200-distilled-600M''' SCREAMING_SNAKE_CASE__ = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] SCREAMING_SNAKE_CASE__ = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] SCREAMING_SNAKE_CASE__ = [ 25_6047, 1_6297, 13_4408, 8165, 24_8066, 1_4734, 950, 1135, 10_5721, 3573, 83, 2_7352, 108, 4_9486, 2, ] @classmethod def lowerCamelCase_ ( cls : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""eng_Latn""" , tgt_lang="""ron_Latn""" ) SCREAMING_SNAKE_CASE : List[Any] = 1 return cls def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] , 25_60_01 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] , 25_60_02 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] , 25_60_57 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' self.assertIn(lowerCamelCase_ , self.tokenizer.all_special_ids ) # fmt: off SCREAMING_SNAKE_CASE : Tuple = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47] # fmt: on SCREAMING_SNAKE_CASE : Dict = self.tokenizer.decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = 10 SCREAMING_SNAKE_CASE : int = self.tokenizer(lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , lowerCamelCase_ ) self.assertEqual(len(lowerCamelCase_ ) , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [25_62_03, 3] ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp() SCREAMING_SNAKE_CASE : str = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = NllbTokenizer.from_pretrained(lowerCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCamelCase_ ) @require_torch def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right( batch["""labels"""] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["""ron_Latn"""] ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertEqual((2, 15) , batch.input_ids.shape ) self.assertEqual((2, 15) , batch.attention_mask.shape ) SCREAMING_SNAKE_CASE : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer(self.src_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=3 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( text_target=self.tgt_text , padding=lowerCamelCase_ , truncation=lowerCamelCase_ , max_length=10 , return_tensors="""pt""" ) SCREAMING_SNAKE_CASE : str = targets["""input_ids"""] SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right( lowerCamelCase_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( nested_simplify(lowerCamelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[25_60_47, 70, 73_56, 2]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 25_60_57, } , ) @require_torch def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = True SCREAMING_SNAKE_CASE : Dict = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] ) SCREAMING_SNAKE_CASE : List[str] = False SCREAMING_SNAKE_CASE : int = self.tokenizer( """UN Chief says there is no military solution in Syria""" , src_lang="""eng_Latn""" , tgt_lang="""fra_Latn""" ) self.assertEqual( inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
79
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : List[str] = tokenizer SCREAMING_SNAKE_CASE : Dict = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang def __len__( self : List[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
79
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """MIT/ast-finetuned-audioset-10-10-0.4593""": ( """https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''audio-spectrogram-transformer''' def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any]=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[str]=30_72 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : Optional[Any]=0.0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : str=16 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Tuple=10 , lowerCamelCase_ : Union[str, Any]=10 , lowerCamelCase_ : Optional[Any]=10_24 , lowerCamelCase_ : List[str]=1_28 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Tuple = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[str] = initializer_range SCREAMING_SNAKE_CASE : Dict = layer_norm_eps SCREAMING_SNAKE_CASE : List[Any] = patch_size SCREAMING_SNAKE_CASE : int = qkv_bias SCREAMING_SNAKE_CASE : Tuple = frequency_stride SCREAMING_SNAKE_CASE : List[str] = time_stride SCREAMING_SNAKE_CASE : Union[str, Any] = max_length SCREAMING_SNAKE_CASE : Union[str, Any] = num_mel_bins
79
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
1
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCAmelCase = { """configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""VivitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """VivitModel""", """VivitPreTrainedModel""", """VivitForVideoClassification""", ] if TYPE_CHECKING: from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_vivit import VivitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vivit import ( VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST, VivitForVideoClassification, VivitModel, VivitPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(features=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column: if all( isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ): return value elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE : str = {} if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa} elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ ) return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) elif isinstance(lowerCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ ) return self.recursive_tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ ) for column_name in batch: SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] ) return batch
79
1
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __A ( lowerCamelCase_ ): """simple docstring""" return getitem, k def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return setitem, k, v def __A ( lowerCamelCase_ ): """simple docstring""" return delitem, k def __A ( lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ): """simple docstring""" try: return fun(lowerCamelCase_ , *lowerCamelCase_ ), None except Exception as e: return None, e __UpperCAmelCase = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) __UpperCAmelCase = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] __UpperCAmelCase = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] __UpperCAmelCase = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] __UpperCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] __UpperCAmelCase = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = HashMap(initial_block_size=4 ) SCREAMING_SNAKE_CASE : Optional[Any] = {} for _, (fun, *args) in enumerate(lowerCamelCase_ ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = _run_operation(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = _run_operation(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) assert my_res == py_res assert str(lowerCamelCase_ ) == str(lowerCamelCase_ ) assert set(lowerCamelCase_ ) == set(lowerCamelCase_ ) assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) assert set(my.items() ) == set(py.items() ) def __A ( ): """simple docstring""" def is_public(lowerCamelCase_ ) -> bool: return not name.startswith("""_""" ) SCREAMING_SNAKE_CASE : Tuple = {name for name in dir({} ) if is_public(lowerCamelCase_ )} SCREAMING_SNAKE_CASE : List[Any] = {name for name in dir(HashMap() ) if is_public(lowerCamelCase_ )} assert dict_public_names > hash_public_names
79
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
1
'''simple docstring''' def __A ( lowerCamelCase_ = 10_00 ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = 3 SCREAMING_SNAKE_CASE : List[str] = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(f'''{solution() = }''')
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' import argparse import json from tqdm import tqdm def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--src_path""" , type=lowerCamelCase_ , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , ) parser.add_argument( """--evaluation_set""" , type=lowerCamelCase_ , help="""where to store parsed evaluation_set file""" , ) parser.add_argument( """--gold_data_path""" , type=lowerCamelCase_ , help="""where to store parsed gold_data_path file""" , ) SCREAMING_SNAKE_CASE : str = parser.parse_args() with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open( args.gold_data_path , """w""" ) as gold_file: SCREAMING_SNAKE_CASE : Optional[int] = json.load(lowerCamelCase_ ) for dpr_record in tqdm(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[int] = dpr_record["""question"""] SCREAMING_SNAKE_CASE : Any = [context["""title"""] for context in dpr_record["""positive_ctxs"""]] eval_file.write(question + """\n""" ) gold_file.write("""\t""".join(lowerCamelCase_ ) + """\n""" ) if __name__ == "__main__": main()
79
'''simple docstring''' __UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
79
1
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase__ : """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict=13 , lowerCamelCase_ : Union[str, Any]=30 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : str=3 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Any=4 , lowerCamelCase_ : Dict=37 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=10 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : int=3 , lowerCamelCase_ : Optional[int]=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = parent SCREAMING_SNAKE_CASE : int = batch_size SCREAMING_SNAKE_CASE : Optional[Any] = image_size SCREAMING_SNAKE_CASE : int = patch_size SCREAMING_SNAKE_CASE : Tuple = num_channels SCREAMING_SNAKE_CASE : List[Any] = is_training SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE : str = hidden_size SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = type_sequence_label_size SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) SCREAMING_SNAKE_CASE : Tuple = (image_size // patch_size) ** 2 SCREAMING_SNAKE_CASE : Union[str, Any] = num_patches + 1 def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Optional[int] = self.get_config() return config, pixel_values, labels def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = TFViTModel(config=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. SCREAMING_SNAKE_CASE : Optional[Any] = self.image_size // 2 SCREAMING_SNAKE_CASE : Dict = pixel_values[:, :, :image_size, :image_size] SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ , training=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.type_sequence_label_size SCREAMING_SNAKE_CASE : Any = TFViTForImageClassification(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. SCREAMING_SNAKE_CASE : Optional[Any] = self.image_size // 2 SCREAMING_SNAKE_CASE : Any = pixel_values[:, :, :image_size, :image_size] SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ , interpolate_pos_encoding=lowerCamelCase_ , training=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images SCREAMING_SNAKE_CASE : Union[str, Any] = 1 SCREAMING_SNAKE_CASE : str = TFViTForImageClassification(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs SCREAMING_SNAKE_CASE : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () SCREAMING_SNAKE_CASE__ = ( {'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification} if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = TFViTModelTester(self ) SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowerCamelCase_ ( self : int ): '''simple docstring''' pass def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) SCREAMING_SNAKE_CASE : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase_ , tf.keras.layers.Layer ) ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = model_class(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @slow def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ) SCREAMING_SNAKE_CASE : str = self.default_image_processor SCREAMING_SNAKE_CASE : int = prepare_img() SCREAMING_SNAKE_CASE : Tuple = image_processor(images=lowerCamelCase_ , return_tensors="""tf""" ) # forward pass SCREAMING_SNAKE_CASE : int = model(**lowerCamelCase_ ) # verify the logits SCREAMING_SNAKE_CASE : Any = tf.TensorShape((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 )
79
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : str ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint} SCREAMING_SNAKE_CASE : Dict = self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
79
1
'''simple docstring''' import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """vocab.json"""} __UpperCAmelCase = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } __UpperCAmelCase = {"""mgp-str""": 27} class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[Any]="[GO]" , lowerCamelCase_ : List[str]="[GO]" , lowerCamelCase_ : Tuple="[s]" , lowerCamelCase_ : Any="[GO]" , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' super().__init__( unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , **lowerCamelCase_ , ) with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle: SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = {v: k for k, v in self.vocab.items()} @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return len(self.vocab ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return dict(self.vocab , **self.added_tokens_encoder ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = [] for s in text: char_tokens.extend(lowerCamelCase_ ) return char_tokens def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Dict ): '''simple docstring''' return self.vocab.get(lowerCamelCase_ , self.vocab.get(self.unk_token ) ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return self.decoder.get(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error("""Vocabulary path ({}) should be a directory""".format(lowerCamelCase_ ) ) return SCREAMING_SNAKE_CASE : List[str] = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" ) return (vocab_file,)
79
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __UpperCAmelCase = None __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __UpperCAmelCase = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __UpperCAmelCase = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = BigBirdTokenizer SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = [] def __init__( self : Any , lowerCamelCase_ : str=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict="<unk>" , lowerCamelCase_ : int="<s>" , lowerCamelCase_ : Optional[Any]="</s>" , lowerCamelCase_ : Dict="<pad>" , lowerCamelCase_ : Tuple="[SEP]" , lowerCamelCase_ : Dict="[MASK]" , lowerCamelCase_ : Union[str, Any]="[CLS]" , **lowerCamelCase_ : Dict , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token SCREAMING_SNAKE_CASE : Any = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token super().__init__( lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : List[Any] = vocab_file SCREAMING_SNAKE_CASE : Optional[Any] = False if not self.vocab_file else True def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [self.sep_token_id] SCREAMING_SNAKE_CASE : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = [self.sep_token_id] SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Tuple = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ): copyfile(self.vocab_file , lowerCamelCase_ ) return (out_vocab_file,)
79
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Optional[int] , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Any ): '''simple docstring''' warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" ) self.assertIsInstance(lowerCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" ) self.assertIsInstance(lowerCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
79
1
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) __UpperCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(lowercase_ )} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , ) SCREAMING_SNAKE_CASE__ = field( default=64 , metadata={ '''help''': ( '''The maximum number of tokens for the question. Questions longer than this will ''' '''be truncated to this length.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=30 , metadata={ '''help''': ( '''The maximum length of an answer that can be generated. This is needed because the start ''' '''and end predictions are not conditioned on one another.''' ) } , ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} ) SCREAMING_SNAKE_CASE__ = field( default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) SCREAMING_SNAKE_CASE__ = field( default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} ) SCREAMING_SNAKE_CASE__ = field( default=0 , metadata={ '''help''': ( '''language id of input for language-specific xlm models (see''' ''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)''' ) } , ) SCREAMING_SNAKE_CASE__ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''train''' SCREAMING_SNAKE_CASE__ = '''dev''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = 42 def __init__( self : Any , lowerCamelCase_ : SquadDataTrainingArguments , lowerCamelCase_ : PreTrainedTokenizer , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Union[str, Split] = Split.train , lowerCamelCase_ : Optional[bool] = False , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[str] = "pt" , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = args SCREAMING_SNAKE_CASE : List[Any] = is_language_sensitive SCREAMING_SNAKE_CASE : str = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(lowerCamelCase_ , lowerCamelCase_ ): try: SCREAMING_SNAKE_CASE : Dict = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) SCREAMING_SNAKE_CASE : str = mode # Load data features from cache or dataset file SCREAMING_SNAKE_CASE : Union[str, Any] = """v2""" if args.version_2_with_negative else """v1""" SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. SCREAMING_SNAKE_CASE : Tuple = cached_features_file + """.lock""" with FileLock(lowerCamelCase_ ): if os.path.exists(lowerCamelCase_ ) and not args.overwrite_cache: SCREAMING_SNAKE_CASE : Any = time.time() SCREAMING_SNAKE_CASE : Any = torch.load(lowerCamelCase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. SCREAMING_SNAKE_CASE : Optional[int] = self.old_features["""features"""] SCREAMING_SNAKE_CASE : Any = self.old_features.get("""dataset""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = self.old_features.get("""examples""" , lowerCamelCase_ ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' """ future run""" ) else: if mode == Split.dev: SCREAMING_SNAKE_CASE : Optional[Any] = self.processor.get_dev_examples(args.data_dir ) else: SCREAMING_SNAKE_CASE : List[Any] = self.processor.get_train_examples(args.data_dir ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = squad_convert_examples_to_features( examples=self.examples , tokenizer=lowerCamelCase_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Optional[Any] = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , lowerCamelCase_ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : Tuple ): '''simple docstring''' return len(self.features ) def __getitem__( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.features[i] SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long ) SCREAMING_SNAKE_CASE : int = torch.tensor(feature.attention_mask , dtype=torch.long ) SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(feature.token_type_ids , dtype=torch.long ) SCREAMING_SNAKE_CASE : Dict = torch.tensor(feature.cls_index , dtype=torch.long ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(feature.p_mask , dtype=torch.float ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(feature.is_impossible , dtype=torch.float ) SCREAMING_SNAKE_CASE : Optional[Any] = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(feature.start_position , dtype=torch.long ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
79
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""", """microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""", """microsoft/deberta-v2-xlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json""" ), """microsoft/deberta-v2-xxlarge-mnli""": ( """https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json""" ), } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''deberta-v2''' def __init__( self : int , lowerCamelCase_ : Optional[Any]=12_81_00 , lowerCamelCase_ : str=15_36 , lowerCamelCase_ : int=24 , lowerCamelCase_ : List[str]=24 , lowerCamelCase_ : List[Any]=61_44 , lowerCamelCase_ : List[Any]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Dict=1e-7 , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[int]=-1 , lowerCamelCase_ : List[str]=0 , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : Dict="gelu" , **lowerCamelCase_ : Optional[int] , ): '''simple docstring''' super().__init__(**lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : List[str] = intermediate_size SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : str = max_position_embeddings SCREAMING_SNAKE_CASE : Tuple = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = initializer_range SCREAMING_SNAKE_CASE : List[Any] = relative_attention SCREAMING_SNAKE_CASE : str = max_relative_positions SCREAMING_SNAKE_CASE : int = pad_token_id SCREAMING_SNAKE_CASE : List[str] = position_biased_input # Backwards compatibility if type(lowerCamelCase_ ) == str: SCREAMING_SNAKE_CASE : Dict = [x.strip() for x in pos_att_type.lower().split("""|""" )] SCREAMING_SNAKE_CASE : Any = pos_att_type SCREAMING_SNAKE_CASE : Any = vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps SCREAMING_SNAKE_CASE : str = kwargs.get("""pooler_hidden_size""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = pooler_dropout SCREAMING_SNAKE_CASE : Any = pooler_hidden_act class UpperCamelCase__ ( lowercase_ ): """simple docstring""" @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE : Union[str, Any] = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return 12 def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : int = -1 , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional["TensorType"] = None , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : int = 40 , lowerCamelCase_ : "PreTrainedTokenizerBase" = None , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(preprocessor=lowerCamelCase_ , framework=lowerCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
79
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''speech_to_text''' SCREAMING_SNAKE_CASE__ = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : int , lowerCamelCase_ : Optional[int]=1_00_00 , lowerCamelCase_ : str=12 , lowerCamelCase_ : List[Any]=20_48 , lowerCamelCase_ : int=4 , lowerCamelCase_ : Optional[Any]=6 , lowerCamelCase_ : int=20_48 , lowerCamelCase_ : List[str]=4 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : int=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : List[str]="relu" , lowerCamelCase_ : Optional[Any]=2_56 , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Tuple=0.02 , lowerCamelCase_ : Dict=2 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : List[Any]=0 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : List[str]=60_00 , lowerCamelCase_ : Optional[int]=10_24 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Optional[int]=(5, 5) , lowerCamelCase_ : int=10_24 , lowerCamelCase_ : Optional[Any]=80 , lowerCamelCase_ : Any=1 , **lowerCamelCase_ : int , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = vocab_size SCREAMING_SNAKE_CASE : List[str] = d_model SCREAMING_SNAKE_CASE : Dict = encoder_ffn_dim SCREAMING_SNAKE_CASE : Any = encoder_layers SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads SCREAMING_SNAKE_CASE : int = decoder_ffn_dim SCREAMING_SNAKE_CASE : Any = decoder_layers SCREAMING_SNAKE_CASE : Tuple = decoder_attention_heads SCREAMING_SNAKE_CASE : List[Any] = dropout SCREAMING_SNAKE_CASE : Dict = attention_dropout SCREAMING_SNAKE_CASE : Any = activation_dropout SCREAMING_SNAKE_CASE : Tuple = activation_function SCREAMING_SNAKE_CASE : Optional[Any] = init_std SCREAMING_SNAKE_CASE : str = encoder_layerdrop SCREAMING_SNAKE_CASE : Any = decoder_layerdrop SCREAMING_SNAKE_CASE : str = use_cache SCREAMING_SNAKE_CASE : int = encoder_layers SCREAMING_SNAKE_CASE : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True SCREAMING_SNAKE_CASE : Any = max_source_positions SCREAMING_SNAKE_CASE : Optional[int] = max_target_positions SCREAMING_SNAKE_CASE : Tuple = num_conv_layers SCREAMING_SNAKE_CASE : Union[str, Any] = list(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = conv_channels SCREAMING_SNAKE_CASE : Any = input_feat_per_channel SCREAMING_SNAKE_CASE : Dict = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''' ) super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , is_encoder_decoder=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
79
'''simple docstring''' from collections import deque from math import floor from random import random from time import time class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = {} def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: SCREAMING_SNAKE_CASE : str = [[w, v]] if not self.graph.get(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = [] def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : str ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Tuple = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Any = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : str = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : Union[str, Any] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Any=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = deque() SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : int = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : Dict = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : List[str] = [] if s == -2: SCREAMING_SNAKE_CASE : Union[str, Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : List[str] = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[Any] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : int = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return sorted_nodes def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Union[str, Any] = s SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : Union[str, Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : int = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : int = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = s SCREAMING_SNAKE_CASE : List[Any] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = -2 SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Tuple = s SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : str = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : str = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Dict = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : List[str] = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = s SCREAMING_SNAKE_CASE : Optional[int] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str=-2 , lowerCamelCase_ : int=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin def lowerCamelCase_ ( self : int , lowerCamelCase_ : Tuple=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = time() return end - begin class UpperCamelCase__ : """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Union[str, Any]=1 ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, v]] # add the other way if self.graph.get(lowerCamelCase_ ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist SCREAMING_SNAKE_CASE : Any = [[w, u]] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' if self.graph.get(lowerCamelCase_ ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCamelCase_ ) # the other way round if self.graph.get(lowerCamelCase_ ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : str=-2 , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if s == d: return [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] if s == -2: SCREAMING_SNAKE_CASE : List[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Union[str, Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(lowerCamelCase_ ) return visited else: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : Any = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : List[str] = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return visited def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[str]=-1 ): '''simple docstring''' if c == -1: SCREAMING_SNAKE_CASE : Any = floor(random() * 1_00_00 ) + 10 for i in range(lowerCamelCase_ ): # every vertex has max 100 edges for _ in range(floor(random() * 1_02 ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = floor(random() * c ) + 1 if n != i: self.add_pair(lowerCamelCase_ , lowerCamelCase_ , 1 ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[Any]=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = deque() SCREAMING_SNAKE_CASE : Tuple = [] if s == -2: SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] d.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) while d: SCREAMING_SNAKE_CASE : List[Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = -2 SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Optional[Any] = False SCREAMING_SNAKE_CASE : Optional[Any] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Optional[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : str = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Optional[int] = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : int = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Union[str, Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = s SCREAMING_SNAKE_CASE : str = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return list(lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : Any = list(self.graph )[0] stack.append(lowerCamelCase_ ) visited.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -2 SCREAMING_SNAKE_CASE : int = [] SCREAMING_SNAKE_CASE : int = s SCREAMING_SNAKE_CASE : Union[str, Any] = False SCREAMING_SNAKE_CASE : Tuple = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: SCREAMING_SNAKE_CASE : Any = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): SCREAMING_SNAKE_CASE : Optional[Any] = len(lowerCamelCase_ ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) SCREAMING_SNAKE_CASE : Tuple = node[1] break # check if all the children are visited if s == ss: stack.pop() SCREAMING_SNAKE_CASE : Any = True if len(lowerCamelCase_ ) != 0: SCREAMING_SNAKE_CASE : str = stack[len(lowerCamelCase_ ) - 1] else: SCREAMING_SNAKE_CASE : Optional[Any] = False indirect_parents.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = s SCREAMING_SNAKE_CASE : Tuple = ss # check if se have reached the starting point if len(lowerCamelCase_ ) == 0: return False def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[str]=-2 , lowerCamelCase_ : str=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = time() self.dfs(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = time() return end - begin def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Dict=-2 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = time() self.bfs(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = time() return end - begin
79
1
'''simple docstring''' import torch def __A ( ): """simple docstring""" if torch.cuda.is_available(): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cuda.device_count() else: SCREAMING_SNAKE_CASE : Any = 0 print(f'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
79
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""} __UpperCAmelCase = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, } __UpperCAmelCase = { """moussaKam/mbarthez""": 1024, """moussaKam/barthez""": 1024, """moussaKam/barthez-orangesum-title""": 1024, } __UpperCAmelCase = """▁""" class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] def __init__( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple="<s>" , lowerCamelCase_ : Union[str, Any]="</s>" , lowerCamelCase_ : Tuple="</s>" , lowerCamelCase_ : str="<s>" , lowerCamelCase_ : Optional[int]="<unk>" , lowerCamelCase_ : List[Any]="<pad>" , lowerCamelCase_ : Optional[Any]="<mask>" , lowerCamelCase_ : Optional[Dict[str, Any]] = None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token SCREAMING_SNAKE_CASE : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Dict = vocab_file SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} SCREAMING_SNAKE_CASE : str = len(self.sp_model ) - 1 SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] SCREAMING_SNAKE_CASE : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase_ ( self : Any , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase_ )) + [1] return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1] def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id] SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return len(self.sp_model ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE : List[str] = self.sp_model.PieceToId(lowerCamelCase_ ) return spm_id if spm_id else self.unk_token_id def lowerCamelCase_ ( self : int , lowerCamelCase_ : List[str] ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Tuple = """""" SCREAMING_SNAKE_CASE : Dict = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase_ ) + token SCREAMING_SNAKE_CASE : str = True SCREAMING_SNAKE_CASE : Optional[Any] = [] else: current_sub_tokens.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = False out_string += self.sp_model.decode(lowerCamelCase_ ) return out_string.strip() def __getstate__( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Optional[Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE : Dict = os.path.join( lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase_ , """wb""" ) as fi: SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase_ ) return (out_vocab_file,)
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""ConvNextFeatureExtractor"""] __UpperCAmelCase = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
79
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = TFXLMRobertaModel.from_pretrained("""jplu/tf-xlm-roberta-base""" ) SCREAMING_SNAKE_CASE : Dict = { """input_ids""": tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute" """attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ), } SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )["""last_hidden_state"""] SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 6, 7_68) ) self.assertEqual(output.shape , lowerCamelCase_ ) # compare the actual values for a slice. SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [0.0_681_762, 0.10_894_451, 0.06_772_504], [-0.06_423_668, 0.02_366_615, 0.04_329_344], [-0.06_057_295, 0.09_974_135, -0.00_070_584], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
79
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''Translation''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : int ): '''simple docstring''' return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "dict" SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = field(default='''TranslationVariableLanguages''' , init=lowercase_ , repr=lowercase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = sorted(set(self.languages ) ) if self.languages else None SCREAMING_SNAKE_CASE : str = len(self.languages ) if self.languages else None def __call__( self : Tuple ): '''simple docstring''' return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = set(self.languages ) if self.languages and set(lowerCamelCase_ ) - lang_set: raise ValueError( f'''Some languages in example ({", ".join(sorted(set(lowerCamelCase_ ) - lang_set ) )}) are not in valid set ({", ".join(lowerCamelCase_ )}).''' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. SCREAMING_SNAKE_CASE : List[Any] = [] for lang, text in translation_dict.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = zip(*sorted(lowerCamelCase_ ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
79
1
'''simple docstring''' import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase_ ( *lowerCamelCase_ : List[str] , **lowerCamelCase_ : List[str] ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def lowerCamelCase_ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) SCREAMING_SNAKE_CASE : Tuple = [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] return object_detector, examples def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = object_detector(examples[0] , threshold=0.0 ) SCREAMING_SNAKE_CASE : List[str] = len(lowerCamelCase_ ) self.assertGreater(lowerCamelCase_ , 0 ) self.assertEqual( lowerCamelCase_ , [ { """score""": ANY(lowerCamelCase_ ), """label""": ANY(lowerCamelCase_ ), """box""": {"""xmin""": ANY(lowerCamelCase_ ), """ymin""": ANY(lowerCamelCase_ ), """xmax""": ANY(lowerCamelCase_ ), """ymax""": ANY(lowerCamelCase_ )}, } for i in range(lowerCamelCase_ ) ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' pass @require_torch def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = pipeline( """zero-shot-object-detection""" , model="""hf-internal-testing/tiny-random-owlvit-object-detection""" ) SCREAMING_SNAKE_CASE : int = object_detector( """./tests/fixtures/tests_samples/COCO/000000039769.png""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}}, {"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, ] , ) SCREAMING_SNAKE_CASE : List[str] = object_detector( [ { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """candidate_labels""": ["""cat""", """remote""", """couch"""], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"""score""": 0.7_235, """label""": """cat""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7_218, """label""": """remote""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.7_184, """label""": """couch""", """box""": {"""xmin""": 2_04, """ymin""": 1_67, """xmax""": 2_32, """ymax""": 1_90}}, {"""score""": 0.6_748, """label""": """remote""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6_656, """label""": """cat""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6_614, """label""": """couch""", """box""": {"""xmin""": 5_71, """ymin""": 83, """xmax""": 5_98, """ymax""": 1_03}}, {"""score""": 0.6_456, """label""": """remote""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, {"""score""": 0.642, """label""": """remote""", """box""": {"""xmin""": 67, """ymin""": 2_74, """xmax""": 93, """ymax""": 2_97}}, {"""score""": 0.6_419, """label""": """cat""", """box""": {"""xmin""": 4_94, """ymin""": 1_05, """xmax""": 5_21, """ymax""": 1_27}}, ] ] , ) @require_torch @slow def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline("""zero-shot-object-detection""" ) SCREAMING_SNAKE_CASE : Dict = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ] , ) SCREAMING_SNAKE_CASE : str = object_detector( [ { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, { """image""": """http://images.cocodataset.org/val2017/000000039769.jpg""", """candidate_labels""": ["""cat""", """remote""", """couch"""], }, ] , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ [ {"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ], [ {"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, {"""score""": 0.1_474, """label""": """remote""", """box""": {"""xmin""": 3_35, """ymin""": 74, """xmax""": 3_71, """ymax""": 1_87}}, {"""score""": 0.1_208, """label""": """couch""", """box""": {"""xmin""": 4, """ymin""": 0, """xmax""": 6_42, """ymax""": 4_76}}, ], ] , ) @require_tf @unittest.skip("""Zero Shot Object Detection not implemented in TF""" ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' pass @require_torch @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = 0.2 SCREAMING_SNAKE_CASE : List[Any] = pipeline("""zero-shot-object-detection""" ) SCREAMING_SNAKE_CASE : Any = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , threshold=lowerCamelCase_ , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, {"""score""": 0.2_537, """label""": """cat""", """box""": {"""xmin""": 1, """ymin""": 55, """xmax""": 3_15, """ymax""": 4_72}}, ] , ) @require_torch @slow def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = 2 SCREAMING_SNAKE_CASE : Optional[int] = pipeline("""zero-shot-object-detection""" ) SCREAMING_SNAKE_CASE : Any = object_detector( """http://images.cocodataset.org/val2017/000000039769.jpg""" , candidate_labels=["""cat""", """remote""", """couch"""] , top_k=lowerCamelCase_ , ) self.assertEqual( nested_simplify(lowerCamelCase_ , decimals=4 ) , [ {"""score""": 0.2_868, """label""": """cat""", """box""": {"""xmin""": 3_24, """ymin""": 20, """xmax""": 6_40, """ymax""": 3_73}}, {"""score""": 0.277, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 72, """xmax""": 1_77, """ymax""": 1_15}}, ] , )
79
'''simple docstring''' import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Dict , *lowerCamelCase_ : List[str] , **lowerCamelCase_ : Dict ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , lowerCamelCase_ , ) super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
79
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""swish""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = get_activation("""silu""" ) self.assertIsInstance(lowerCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = get_activation("""mish""" ) self.assertIsInstance(lowerCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = get_activation("""gelu""" ) self.assertIsInstance(lowerCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
79
'''simple docstring''' from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __UpperCAmelCase = logging.get_logger(__name__) class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = 42 SCREAMING_SNAKE_CASE__ = None @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : str ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : int , lowerCamelCase_ : Dict ): '''simple docstring''' raise NotImplementedError def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def lowerCamelCase_ ( cls : Any ): '''simple docstring''' return f'''`pip install {cls.pip_package or cls.name}`''' class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''optuna''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_optuna_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Dict ): '''simple docstring''' return run_hp_search_optuna(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : Any ): '''simple docstring''' return default_hp_space_optuna(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''ray''' SCREAMING_SNAKE_CASE__ = '''\'ray[tune]\'''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_ray_available() def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_ray(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Optional[int] ): '''simple docstring''' return default_hp_space_ray(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''sigopt''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_sigopt_available() def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : int ): '''simple docstring''' return run_hp_search_sigopt(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return default_hp_space_sigopt(lowerCamelCase_ ) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''wandb''' @staticmethod def lowerCamelCase_ ( ): '''simple docstring''' return is_wandb_available() def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Optional[Any] ): '''simple docstring''' return run_hp_search_wandb(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Tuple ): '''simple docstring''' return default_hp_space_wandb(lowerCamelCase_ ) __UpperCAmelCase = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(lowerCamelCase_ ) > 0: SCREAMING_SNAKE_CASE : List[Any] = available_backends[0].name if len(lowerCamelCase_ ) > 1: logger.info( f'''{len(lowerCamelCase_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = { """configuration_blenderbot""": [ """BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlenderbotConfig""", """BlenderbotOnnxConfig""", ], """tokenization_blenderbot""": ["""BlenderbotTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""BlenderbotTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlenderbotForCausalLM""", """BlenderbotForConditionalGeneration""", """BlenderbotModel""", """BlenderbotPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """TFBlenderbotForConditionalGeneration""", """TFBlenderbotModel""", """TFBlenderbotPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """FlaxBlenderbotForConditionalGeneration""", """FlaxBlenderbotModel""", """FlaxBlenderbotPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = """""" __UpperCAmelCase = 1 # (0 is vertical, 1 is horizontal) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = get_dataset(lowerCamelCase_ , lowerCamelCase_ ) print("""Processing...""" ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = update_image_and_anno(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for index, image in enumerate(lowerCamelCase_ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' SCREAMING_SNAKE_CASE : Optional[int] = random_chars(32 ) SCREAMING_SNAKE_CASE : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] SCREAMING_SNAKE_CASE : Dict = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' , lowerCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowerCamelCase_ )} with {file_name}''' ) SCREAMING_SNAKE_CASE : Optional[Any] = [] for anno in new_annos[index]: SCREAMING_SNAKE_CASE : Optional[Any] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowerCamelCase_ ) with open(f'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [] SCREAMING_SNAKE_CASE : Any = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ , """*.txt""" ) ): SCREAMING_SNAKE_CASE : str = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(lowerCamelCase_ ) as in_file: SCREAMING_SNAKE_CASE : Any = in_file.readlines() SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , f'''{label_name}.jpg''' ) SCREAMING_SNAKE_CASE : Tuple = [] for obj_list in obj_lists: SCREAMING_SNAKE_CASE : Union[str, Any] = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowerCamelCase_ ) labels.append(lowerCamelCase_ ) return img_paths, labels def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = [] SCREAMING_SNAKE_CASE : List[str] = [] SCREAMING_SNAKE_CASE : Optional[Any] = [] for idx in range(len(lowerCamelCase_ ) ): SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Dict = img_list[idx] path_list.append(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = anno_list[idx] SCREAMING_SNAKE_CASE : Optional[Any] = cva.imread(lowerCamelCase_ ) if flip_type == 1: SCREAMING_SNAKE_CASE : List[str] = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: SCREAMING_SNAKE_CASE : Any = cva.flip(lowerCamelCase_ , lowerCamelCase_ ) for bbox in img_annos: SCREAMING_SNAKE_CASE : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowerCamelCase_ ) new_imgs_list.append(lowerCamelCase_ ) return new_imgs_list, new_annos_lists, path_list def __A ( lowerCamelCase_ = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" SCREAMING_SNAKE_CASE : Dict = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_ ) for _ in range(lowerCamelCase_ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
79
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''luke''' def __init__( self : str , lowerCamelCase_ : Tuple=5_02_67 , lowerCamelCase_ : Union[str, Any]=50_00_00 , lowerCamelCase_ : int=7_68 , lowerCamelCase_ : Union[str, Any]=2_56 , lowerCamelCase_ : Union[str, Any]=12 , lowerCamelCase_ : Tuple=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : Any="gelu" , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Any=5_12 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : Optional[Any]=1e-12 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Dict=None , lowerCamelCase_ : List[Any]=1 , lowerCamelCase_ : Any=0 , lowerCamelCase_ : str=2 , **lowerCamelCase_ : int , ): '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = vocab_size SCREAMING_SNAKE_CASE : int = entity_vocab_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = entity_emb_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : str = num_attention_heads SCREAMING_SNAKE_CASE : Optional[int] = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size SCREAMING_SNAKE_CASE : str = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : str = use_entity_aware_attention SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
79
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''vivit''' def __init__( self : Tuple , lowerCamelCase_ : str=2_24 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : Tuple=[2, 16, 16] , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Dict=12 , lowerCamelCase_ : Any=12 , lowerCamelCase_ : List[Any]=30_72 , lowerCamelCase_ : List[str]="gelu_fast" , lowerCamelCase_ : str=0.0 , lowerCamelCase_ : Any=0.0 , lowerCamelCase_ : Optional[int]=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = hidden_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers SCREAMING_SNAKE_CASE : List[str] = num_attention_heads SCREAMING_SNAKE_CASE : str = intermediate_size SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : Tuple = initializer_range SCREAMING_SNAKE_CASE : str = layer_norm_eps SCREAMING_SNAKE_CASE : str = image_size SCREAMING_SNAKE_CASE : Dict = num_frames SCREAMING_SNAKE_CASE : Optional[Any] = tubelet_size SCREAMING_SNAKE_CASE : Dict = num_channels SCREAMING_SNAKE_CASE : int = qkv_bias super().__init__(**lowerCamelCase_ )
79
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __UpperCAmelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
79
'''simple docstring''' import math class UpperCamelCase__ : """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : Tuple=0 ): # a graph with Node 0,1,...,N-1 '''simple docstring''' SCREAMING_SNAKE_CASE : Any = n SCREAMING_SNAKE_CASE : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight SCREAMING_SNAKE_CASE : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : List[str] , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = w def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): SCREAMING_SNAKE_CASE : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' return self.dp[u][v] if __name__ == "__main__": __UpperCAmelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
79
1
'''simple docstring''' from __future__ import annotations from typing import Generic, TypeVar __UpperCAmelCase = TypeVar("""T""") class UpperCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Any , lowerCamelCase_ : T ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = data SCREAMING_SNAKE_CASE : List[str] = self SCREAMING_SNAKE_CASE : Any = 0 class UpperCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : dict[T, DisjointSetTreeNode[T]] = {} def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : T ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = DisjointSetTreeNode(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : T ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.map[data] if elem_ref != elem_ref.parent: SCREAMING_SNAKE_CASE : Dict = self.find_set(elem_ref.parent.data ) return elem_ref.parent def lowerCamelCase_ ( self : str , lowerCamelCase_ : DisjointSetTreeNode[T] , lowerCamelCase_ : DisjointSetTreeNode[T] ): '''simple docstring''' if nodea.rank > nodea.rank: SCREAMING_SNAKE_CASE : Tuple = nodea else: SCREAMING_SNAKE_CASE : str = nodea if nodea.rank == nodea.rank: nodea.rank += 1 def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : T , lowerCamelCase_ : T ): '''simple docstring''' self.link(self.find_set(lowerCamelCase_ ) , self.find_set(lowerCamelCase_ ) ) class UpperCamelCase__ ( Generic[T] ): """simple docstring""" def __init__( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : dict[T, dict[T, int]] = {} def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : T ): '''simple docstring''' if node not in self.connections: SCREAMING_SNAKE_CASE : List[str] = {} def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : T , lowerCamelCase_ : T , lowerCamelCase_ : int ): '''simple docstring''' self.add_node(lowerCamelCase_ ) self.add_node(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = weight SCREAMING_SNAKE_CASE : Tuple = weight def lowerCamelCase_ ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = [] SCREAMING_SNAKE_CASE : Optional[Any] = set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda lowerCamelCase_ : x[2] ) # creating the disjoint set SCREAMING_SNAKE_CASE : Dict = DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(lowerCamelCase_ ) # MST generation SCREAMING_SNAKE_CASE : str = 0 SCREAMING_SNAKE_CASE : Tuple = 0 SCREAMING_SNAKE_CASE : List[str] = GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = edges[index] index += 1 SCREAMING_SNAKE_CASE : List[Any] = disjoint_set.find_set(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = disjoint_set.find_set(lowerCamelCase_ ) if parent_u != parent_v: num_edges += 1 graph.add_edge(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) disjoint_set.union(lowerCamelCase_ , lowerCamelCase_ ) return graph
79
'''simple docstring''' import math def __A ( lowerCamelCase_ ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCamelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def __A ( lowerCamelCase_ = 1_00_01 ): """simple docstring""" try: SCREAMING_SNAKE_CASE : Tuple = int(lowerCamelCase_ ) except (TypeError, ValueError): raise TypeError("""Parameter nth must be int or castable to int.""" ) from None if nth <= 0: raise ValueError("""Parameter nth must be greater than or equal to one.""" ) SCREAMING_SNAKE_CASE : list[int] = [] SCREAMING_SNAKE_CASE : Dict = 2 while len(lowerCamelCase_ ) < nth: if is_prime(lowerCamelCase_ ): primes.append(lowerCamelCase_ ) num += 1 else: num += 1 return primes[len(lowerCamelCase_ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
79
1
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __UpperCAmelCase = logging.getLogger(__name__) @dataclass class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''whether to use adafactor'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) SCREAMING_SNAKE_CASE__ = field(default=lowercase_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) SCREAMING_SNAKE_CASE__ = field( default=lowercase_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) SCREAMING_SNAKE_CASE__ = field( default='''linear''' , metadata={'''help''': f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
79
'''simple docstring''' from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent __UpperCAmelCase = {"""UserAgent""": UserAgent().random} def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = script.contents[0] SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class UpperCamelCase__ : """simple docstring""" def __init__( self : Dict , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/''' SCREAMING_SNAKE_CASE : Any = self.get_json() def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : Dict ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase_ ( self : int ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase_ ( self : Dict ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return self.user_data["is_private"] def __A ( lowerCamelCase_ = "github" ): """simple docstring""" import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , lowerCamelCase_ ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_50 assert instagram_user.number_of_followers > 12_00_00 assert instagram_user.number_of_followings > 15 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = InstagramUser("""github""") print(instagram_user) print(f'''{instagram_user.number_of_posts = }''') print(f'''{instagram_user.number_of_followers = }''') print(f'''{instagram_user.number_of_followings = }''') print(f'''{instagram_user.email = }''') print(f'''{instagram_user.website = }''') print(f'''{instagram_user.profile_picture_url = }''') print(f'''{instagram_user.is_verified = }''') print(f'''{instagram_user.is_private = }''')
79
1
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" if not nums: # Makes sure that the list is not empty raise ValueError("""List is empty""" ) SCREAMING_SNAKE_CASE : int = sum(lowerCamelCase_ ) / len(lowerCamelCase_ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) __UpperCAmelCase = logging.getLogger(__name__) __UpperCAmelCase = """Hello world! cécé herlolip""" __UpperCAmelCase = namedtuple( """BertAbsConfig""", [ """temp_dir""", """large""", """use_bert_emb""", """finetune_bert""", """encoder""", """share_emb""", """max_pos""", """enc_layers""", """enc_hidden_size""", """enc_heads""", """enc_ff_size""", """enc_dropout""", """dec_layers""", """dec_hidden_size""", """dec_heads""", """dec_ff_size""", """dec_dropout""", ], ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = BertAbsConfig( temp_dir=""".""" , finetune_bert=lowerCamelCase_ , large=lowerCamelCase_ , share_emb=lowerCamelCase_ , use_bert_emb=lowerCamelCase_ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE : int = torch.load(lowerCamelCase_ , lambda lowerCamelCase_ , lowerCamelCase_ : storage ) SCREAMING_SNAKE_CASE : List[str] = AbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) , lowerCamelCase_ ) original.eval() SCREAMING_SNAKE_CASE : Optional[int] = BertAbsSummarizer(lowerCamelCase_ , torch.device("""cpu""" ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info("""convert the model""" ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info("""Make sure that the models' outputs are identical""" ) SCREAMING_SNAKE_CASE : int = BertTokenizer.from_pretrained("""bert-base-uncased""" ) # prepare the model inputs SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.encode("""This is sample éàalj'-.""" ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode("""This is sample 3 éàalj'-.""" ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowerCamelCase_ )) ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(lowerCamelCase_ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE : Optional[int] = encoder_input_ids SCREAMING_SNAKE_CASE : Optional[Any] = decoder_input_ids SCREAMING_SNAKE_CASE : List[str] = None SCREAMING_SNAKE_CASE : Tuple = None SCREAMING_SNAKE_CASE : int = None SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Optional[int] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE : str = original(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : Optional[Any] = original.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = new_model( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )[0] SCREAMING_SNAKE_CASE : str = new_model.generator(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print("""Maximum absolute difference beween weights: {:.2f}""".format(lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Any = torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) if are_identical: logging.info("""all weights are equal up to 1e-3""" ) else: raise ValueError("""the weights are different. The new model is likely different from the original one.""" ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info("""saving the model's state dictionary""" ) torch.save( new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--bertabs_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""", ) __UpperCAmelCase = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
79
1
'''simple docstring''' import copy import re class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = '''hp''' SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = None @classmethod def lowerCamelCase_ ( cls : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = prefix SCREAMING_SNAKE_CASE : Tuple = defaults cls.build_naming_info() @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : str , lowerCamelCase_ : List[str] ): '''simple docstring''' if len(lowerCamelCase_ ) == 0: return "" SCREAMING_SNAKE_CASE : Optional[Any] = None if any(char.isdigit() for char in word ): raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 , len(lowerCamelCase_ ) + 1 ): SCREAMING_SNAKE_CASE : List[str] = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: SCREAMING_SNAKE_CASE : Any = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCamelCase_ : str ): SCREAMING_SNAKE_CASE : str = """""" while integer != 0: SCREAMING_SNAKE_CASE : Optional[int] = chr(ord("""A""" ) + integer % 10 ) + s integer //= 10 return s SCREAMING_SNAKE_CASE : Union[str, Any] = 0 while True: SCREAMING_SNAKE_CASE : List[Any] = word + """#""" + int_to_alphabetic(lowerCamelCase_ ) if sword in info["reverse_short_word"]: continue else: SCREAMING_SNAKE_CASE : List[str] = sword break SCREAMING_SNAKE_CASE : Tuple = short_word SCREAMING_SNAKE_CASE : int = word return short_word @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : int , lowerCamelCase_ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = param_name.split("""_""" ) SCREAMING_SNAKE_CASE : Dict = [TrialShortNamer.shortname_for_word(lowerCamelCase_ , lowerCamelCase_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name SCREAMING_SNAKE_CASE : Tuple = ["""""", """_"""] for separator in separators: SCREAMING_SNAKE_CASE : List[Any] = separator.join(lowerCamelCase_ ) if shortname not in info["reverse_short_param"]: SCREAMING_SNAKE_CASE : str = shortname SCREAMING_SNAKE_CASE : List[str] = param_name return shortname return param_name @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = TrialShortNamer.shortname_for_key(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = short_name SCREAMING_SNAKE_CASE : int = param_name @classmethod def lowerCamelCase_ ( cls : Optional[int] ): '''simple docstring''' if cls.NAMING_INFO is not None: return SCREAMING_SNAKE_CASE : Optional[Any] = { """short_word""": {}, """reverse_short_word""": {}, """short_param""": {}, """reverse_short_param""": {}, } SCREAMING_SNAKE_CASE : Dict = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = info @classmethod def lowerCamelCase_ ( cls : Dict , lowerCamelCase_ : int ): '''simple docstring''' cls.build_naming_info() assert cls.PREFIX is not None SCREAMING_SNAKE_CASE : List[Any] = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue SCREAMING_SNAKE_CASE : Dict = cls.NAMING_INFO["""short_param"""][k] if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : List[str] = 1 if v else 0 SCREAMING_SNAKE_CASE : Dict = """""" if isinstance(lowerCamelCase_ , (int, float) ) else """-""" SCREAMING_SNAKE_CASE : List[Any] = f'''{key}{sep}{v}''' name.append(lowerCamelCase_ ) return "_".join(lowerCamelCase_ ) @classmethod def lowerCamelCase_ ( cls : Optional[int] , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = repr[len(cls.PREFIX ) + 1 :] if repr == "": SCREAMING_SNAKE_CASE : Optional[Any] = [] else: SCREAMING_SNAKE_CASE : Union[str, Any] = repr.split("""_""" ) SCREAMING_SNAKE_CASE : int = {} for value in values: if "-" in value: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = value.split("""-""" ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub("""[0-9.]""" , """""" , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = float(re.sub("""[^0-9.]""" , """""" , lowerCamelCase_ ) ) SCREAMING_SNAKE_CASE : Tuple = cls.NAMING_INFO["""reverse_short_param"""][p_k] SCREAMING_SNAKE_CASE : List[str] = p_v for k in cls.DEFAULTS: if k not in parameters: SCREAMING_SNAKE_CASE : Tuple = cls.DEFAULTS[k] return parameters
79
'''simple docstring''' import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True , lowerCamelCase_="pt" ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(""" """ ) else {} SCREAMING_SNAKE_CASE : Optional[Any] = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : str , lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str]="train" , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]="" , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE : str = Path(lowerCamelCase_ ).joinpath(type_path + """.source""" ) SCREAMING_SNAKE_CASE : Optional[Any] = Path(lowerCamelCase_ ).joinpath(type_path + """.target""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_char_lens(self.src_file ) SCREAMING_SNAKE_CASE : int = max_source_length SCREAMING_SNAKE_CASE : str = max_target_length assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}''' SCREAMING_SNAKE_CASE : List[str] = tokenizer SCREAMING_SNAKE_CASE : Dict = prefix if n_obs is not None: SCREAMING_SNAKE_CASE : List[Any] = self.src_lens[:n_obs] SCREAMING_SNAKE_CASE : int = src_lang SCREAMING_SNAKE_CASE : Optional[int] = tgt_lang def __len__( self : List[Any] ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = index + 1 # linecache starts at 1 SCREAMING_SNAKE_CASE : Dict = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip("""\n""" ) SCREAMING_SNAKE_CASE : Dict = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip("""\n""" ) assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer ) SCREAMING_SNAKE_CASE : Any = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer SCREAMING_SNAKE_CASE : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , """right""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , """right""" ) SCREAMING_SNAKE_CASE : Tuple = source_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : Tuple = target_inputs["""input_ids"""].squeeze() SCREAMING_SNAKE_CASE : List[str] = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def lowerCamelCase_ ( lowerCamelCase_ : Dict ): '''simple docstring''' return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()] def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = torch.stack([x["""input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""attention_mask"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = torch.stack([x["""decoder_input_ids"""] for x in batch] ) SCREAMING_SNAKE_CASE : int = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Dict = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer.pad_token_id ) SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch __UpperCAmelCase = getLogger(__name__) def __A ( lowerCamelCase_ ): """simple docstring""" return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : str = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , """git_log.json""" ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=4 , **lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """w""" ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = git.Repo(search_parent_directories=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" with open(lowerCamelCase_ , """wb""" ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def __A ( lowerCamelCase_ ): """simple docstring""" def remove_articles(lowerCamelCase_ ): return re.sub(R"""\b(a|an|the)\b""" , """ """ , lowerCamelCase_ ) def white_space_fix(lowerCamelCase_ ): return " ".join(text.split() ) def remove_punc(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(lowerCamelCase_ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Optional[int] = normalize_answer(lowerCamelCase_ ).split() SCREAMING_SNAKE_CASE : Tuple = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = sum(common.values() ) if num_same == 0: return 0 SCREAMING_SNAKE_CASE : Optional[int] = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = 1.0 * num_same / len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = (2 * precision * recall) / (precision + recall) return fa def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def __A ( lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def __A ( lowerCamelCase_ ): """simple docstring""" return model_prefix.startswith("""rag""" ) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead SCREAMING_SNAKE_CASE : Dict = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue SCREAMING_SNAKE_CASE : Dict = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
79
1
'''simple docstring''' import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = CLIPTokenizer SCREAMING_SNAKE_CASE__ = CLIPTokenizerFast SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = False def lowerCamelCase_ ( self : int ): '''simple docstring''' super().setUp() # fmt: off SCREAMING_SNAKE_CASE : Any = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on SCREAMING_SNAKE_CASE : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) ) SCREAMING_SNAKE_CASE : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""] SCREAMING_SNAKE_CASE : str = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(lowerCamelCase_ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(lowerCamelCase_ ) ) def lowerCamelCase_ ( self : Union[str, Any] , **lowerCamelCase_ : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , **lowerCamelCase_ : Any ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = """lower newer""" SCREAMING_SNAKE_CASE : Optional[Any] = """lower newer""" return input_text, output_text def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE : List[Any] = """lower newer""" SCREAMING_SNAKE_CASE : Dict = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""] SCREAMING_SNAKE_CASE : Tuple = tokenizer.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE : Optional[int] = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ ) @require_ftfy def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : Dict = self.tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d.""" SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways SCREAMING_SNAKE_CASE : int = """xa\u0303y""" + """ """ + """x\xe3y""" SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_s.tokenize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) # Test that the tokenization is identical on unicode of space type SCREAMING_SNAKE_CASE : str = [ """\u0009""", # (horizontal tab, '\t') """\u000B""", # (vertical tab) """\u000C""", # (form feed) """\u0020""", # (space, ' ') """\u200E""", # (left-to-right mark):w """\u200F""", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: SCREAMING_SNAKE_CASE : int = tokenizer_s.tokenize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) # Test that the tokenization is identical on unicode of line break type SCREAMING_SNAKE_CASE : Dict = [ """\u000A""", # (line feed, '\n') """\r\n""", # (carriage return and line feed, '\r\n') """\u000D""", # (carriage return, '\r') """\r""", # (carriage return, '\r') """\u000D""", # (carriage return, '\r') """\u2028""", # (line separator) """\u2029""", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: SCREAMING_SNAKE_CASE : str = tokenizer_s.tokenize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = tokenizer_r.tokenize(lowerCamelCase_ ) self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): SCREAMING_SNAKE_CASE : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` SCREAMING_SNAKE_CASE : Dict = f'''{text_of_1_token} {text_of_1_token}''' SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Any = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCamelCase_ ) + 1, len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) SCREAMING_SNAKE_CASE : Optional[int] = f''' {text}''' SCREAMING_SNAKE_CASE : Tuple = self.rust_tokenizer_class.from_pretrained( lowerCamelCase_ , use_fast=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : Tuple = tokenizer_r(lowerCamelCase_ , return_offsets_mapping=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCamelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCamelCase_ ) + 1, 1 + len(lowerCamelCase_ ) + 1 + len(lowerCamelCase_ )) , ) def lowerCamelCase_ ( self : int ): '''simple docstring''' with self.assertRaises(lowerCamelCase_ ) as context: self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" ) self.assertTrue( context.exception.args[0].startswith( """The `backend_tokenizer` provided does not match the expected format.""" ) ) @require_ftfy def lowerCamelCase_ ( self : int ): '''simple docstring''' super().test_tokenization_python_rust_equals() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' pass
79
'''simple docstring''' def __A ( lowerCamelCase_ ): """simple docstring""" return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = 0 SCREAMING_SNAKE_CASE : List[str] = number while duplicate > 0: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = divmod(lowerCamelCase_ , 10 ) fact_sum += factorial(lowerCamelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") __UpperCAmelCase = int(input("""Enter number: """).strip()) print( f'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.''' )
79
1
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType __UpperCAmelCase = None __UpperCAmelCase = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image __UpperCAmelCase = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = None # Automatically constructed SCREAMING_SNAKE_CASE__ = "PIL.Image.Image" SCREAMING_SNAKE_CASE__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) SCREAMING_SNAKE_CASE__ = field(default='''Image''' , init=lowercase_ , repr=lowercase_ ) def __call__( self : List[str] ): '''simple docstring''' return self.pa_type def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = np.array(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): return {"path": value, "bytes": None} elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): return {"path": None, "bytes": value} elif isinstance(lowerCamelCase_ , np.ndarray ): # convert the image array to PNG/TIFF bytes return encode_np_array(lowerCamelCase_ ) elif isinstance(lowerCamelCase_ , PIL.Image.Image ): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(lowerCamelCase_ ) elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : dict , lowerCamelCase_ : Any=None ): '''simple docstring''' if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" ) if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support decoding images, please install 'Pillow'.""" ) if token_per_repo_id is None: SCREAMING_SNAKE_CASE : int = {} SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = value["""path"""], value["""bytes"""] if bytes_ is None: if path is None: raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' ) else: if is_local_path(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = PIL.Image.open(lowerCamelCase_ ) else: SCREAMING_SNAKE_CASE : Any = path.split("""::""" )[-1] try: SCREAMING_SNAKE_CASE : str = string_to_dict(lowerCamelCase_ , config.HUB_DATASETS_URL )["""repo_id"""] SCREAMING_SNAKE_CASE : Union[str, Any] = token_per_repo_id.get(lowerCamelCase_ ) except ValueError: SCREAMING_SNAKE_CASE : str = None with xopen(lowerCamelCase_ , """rb""" , use_auth_token=lowerCamelCase_ ) as f: SCREAMING_SNAKE_CASE : List[Any] = BytesIO(f.read() ) SCREAMING_SNAKE_CASE : str = PIL.Image.open(bytes_ ) else: SCREAMING_SNAKE_CASE : str = PIL.Image.open(BytesIO(bytes_ ) ) image.load() # to avoid "Too many open files" errors return image def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value("""binary""" ), "path": Value("""string""" ), } ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ): '''simple docstring''' if pa.types.is_string(storage.type ): SCREAMING_SNAKE_CASE : str = pa.array([None] * len(lowerCamelCase_ ) , type=pa.binary() ) SCREAMING_SNAKE_CASE : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): SCREAMING_SNAKE_CASE : str = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() ) SCREAMING_SNAKE_CASE : Optional[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: SCREAMING_SNAKE_CASE : int = storage.field("""bytes""" ) else: SCREAMING_SNAKE_CASE : Any = pa.array([None] * len(lowerCamelCase_ ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: SCREAMING_SNAKE_CASE : int = storage.field("""path""" ) else: SCREAMING_SNAKE_CASE : List[Any] = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() ) SCREAMING_SNAKE_CASE : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_list(storage.type ): SCREAMING_SNAKE_CASE : List[str] = pa.array( [encode_np_array(np.array(lowerCamelCase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) SCREAMING_SNAKE_CASE : Optional[Any] = pa.array([None] * len(lowerCamelCase_ ) , type=pa.string() ) SCREAMING_SNAKE_CASE : int = pa.StructArray.from_arrays( [bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowerCamelCase_ , self.pa_type ) def lowerCamelCase_ ( self : str , lowerCamelCase_ : pa.StructArray ): '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(lowerCamelCase_ : Tuple ): with xopen(lowerCamelCase_ , """rb""" ) as f: SCREAMING_SNAKE_CASE : List[Any] = f.read() return bytes_ SCREAMING_SNAKE_CASE : int = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) SCREAMING_SNAKE_CASE : Tuple = pa.array( [os.path.basename(lowerCamelCase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) SCREAMING_SNAKE_CASE : Any = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowerCamelCase_ , self.pa_type ) def __A ( ): """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() SCREAMING_SNAKE_CASE : List[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def __A ( lowerCamelCase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : List[Any] = BytesIO() if image.format in list_image_compression_formats(): SCREAMING_SNAKE_CASE : List[Any] = image.format else: SCREAMING_SNAKE_CASE : Dict = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF""" image.save(lowerCamelCase_ , format=lowerCamelCase_ ) return buffer.getvalue() def __A ( lowerCamelCase_ ): """simple docstring""" if hasattr(lowerCamelCase_ , """filename""" ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowerCamelCase_ )} def __A ( lowerCamelCase_ ): """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) SCREAMING_SNAKE_CASE : Optional[int] = array.dtype SCREAMING_SNAKE_CASE : List[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER SCREAMING_SNAKE_CASE : Union[str, Any] = dtype.kind SCREAMING_SNAKE_CASE : Union[str, Any] = dtype.itemsize SCREAMING_SNAKE_CASE : str = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: SCREAMING_SNAKE_CASE : Optional[int] = np.dtype("""|u1""" ) if dtype_kind not in ["u", "i"]: raise TypeError( f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' ) if dtype is not dest_dtype: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: SCREAMING_SNAKE_CASE : Optional[Any] = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: SCREAMING_SNAKE_CASE : Optional[Any] = dtype_byteorder + dtype_kind + str(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = np.dtype(lowerCamelCase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' ) SCREAMING_SNAKE_CASE : Optional[int] = PIL.Image.fromarray(array.astype(lowerCamelCase_ ) ) return {"path": None, "bytes": image_to_bytes(lowerCamelCase_ )} def __A ( lowerCamelCase_ ): """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("""To support encoding images, please install 'Pillow'.""" ) if objs: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[int] = first_non_null_value(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowerCamelCase_ , np.ndarray ): SCREAMING_SNAKE_CASE : List[Any] = no_op_if_value_is_null(lowerCamelCase_ ) return [obj_to_image_dict_func(lowerCamelCase_ ) for obj in objs] elif isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : Union[str, Any] = no_op_if_value_is_null(lowerCamelCase_ ) return [obj_to_image_dict_func(lowerCamelCase_ ) for obj in objs] else: return objs else: return objs
79
'''simple docstring''' # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING import numpy as np import pyarrow as pa from .. import config from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import torch class UpperCamelCase__ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ): """simple docstring""" def __init__( self : Tuple , lowerCamelCase_ : str=None , **lowerCamelCase_ : Dict ): '''simple docstring''' super().__init__(features=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = torch_tensor_kwargs import torch # noqa import torch at initialization def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and column: if all( isinstance(lowerCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return torch.stack(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : int ): '''simple docstring''' import torch if isinstance(lowerCamelCase_ , (str, bytes, type(lowerCamelCase_ )) ): return value elif isinstance(lowerCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() SCREAMING_SNAKE_CASE : str = {} if isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): SCREAMING_SNAKE_CASE : Any = {"""dtype""": torch.intaa} elif isinstance(lowerCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): SCREAMING_SNAKE_CASE : int = {"""dtype""": torch.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(lowerCamelCase_ , PIL.Image.Image ): SCREAMING_SNAKE_CASE : List[Any] = np.asarray(lowerCamelCase_ ) return torch.tensor(lowerCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Optional[Any] ): '''simple docstring''' import torch # support for torch, tf, jax etc. if hasattr(lowerCamelCase_ , """__array__""" ) and not isinstance(lowerCamelCase_ , torch.Tensor ): SCREAMING_SNAKE_CASE : Dict = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(lowerCamelCase_ , np.ndarray ): if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) elif isinstance(lowerCamelCase_ , (list, tuple) ): return self._consolidate([self.recursive_tensorize(lowerCamelCase_ ) for substruct in data_struct] ) return self._tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : int , lowerCamelCase_ : dict ): '''simple docstring''' return map_nested(self._recursive_tensorize , lowerCamelCase_ , map_list=lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_row(lowerCamelCase_ ) return self.recursive_tensorize(lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.numpy_arrow_extractor().extract_column(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = self.python_features_decoder.decode_column(lowerCamelCase_ , pa_table.column_names[0] ) SCREAMING_SNAKE_CASE : List[str] = self.recursive_tensorize(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._consolidate(lowerCamelCase_ ) return column def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : pa.Table ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = self.python_features_decoder.decode_batch(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.recursive_tensorize(lowerCamelCase_ ) for column_name in batch: SCREAMING_SNAKE_CASE : Tuple = self._consolidate(batch[column_name] ) return batch
79
1
'''simple docstring''' __UpperCAmelCase = [0, 2, 4, 6, 8] __UpperCAmelCase = [1, 3, 5, 7, 9] def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 SCREAMING_SNAKE_CASE : Tuple = 0 for digit in range(10 ): SCREAMING_SNAKE_CASE : Union[str, Any] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , lowerCamelCase_ , lowerCamelCase_ ) return result SCREAMING_SNAKE_CASE : Optional[Any] = 0 for digita in range(10 ): SCREAMING_SNAKE_CASE : Union[str, Any] = digita if (remainder + digita) % 2 == 0: SCREAMING_SNAKE_CASE : Dict = ODD_DIGITS else: SCREAMING_SNAKE_CASE : Any = EVEN_DIGITS for digita in other_parity_digits: SCREAMING_SNAKE_CASE : Union[str, Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , lowerCamelCase_ , lowerCamelCase_ , ) return result def __A ( lowerCamelCase_ = 9 ): """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(lowerCamelCase_ , 0 , [0] * length , lowerCamelCase_ ) return result if __name__ == "__main__": print(f'''{solution() = }''')
79
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __UpperCAmelCase = random.Random() def __A ( lowerCamelCase_ , lowerCamelCase_=1.0 , lowerCamelCase_=None , lowerCamelCase_=None ): """simple docstring""" if rng is None: SCREAMING_SNAKE_CASE : Optional[Any] = global_rng SCREAMING_SNAKE_CASE : Optional[int] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[int]=4_00 , lowerCamelCase_ : int=20_00 , lowerCamelCase_ : List[str]=20_48 , lowerCamelCase_ : Optional[Any]=1_28 , lowerCamelCase_ : Optional[Any]=1 , lowerCamelCase_ : str=5_12 , lowerCamelCase_ : Dict=30 , lowerCamelCase_ : Dict=4_41_00 , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = parent SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size SCREAMING_SNAKE_CASE : List[str] = min_seq_length SCREAMING_SNAKE_CASE : Any = max_seq_length SCREAMING_SNAKE_CASE : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE : int = spectrogram_length SCREAMING_SNAKE_CASE : List[Any] = feature_size SCREAMING_SNAKE_CASE : Any = num_audio_channels SCREAMING_SNAKE_CASE : Tuple = hop_length SCREAMING_SNAKE_CASE : str = chunk_length SCREAMING_SNAKE_CASE : Dict = sampling_rate def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Tuple=False , lowerCamelCase_ : Any=False ): '''simple docstring''' def _flatten(lowerCamelCase_ : Dict ): return list(itertools.chain(*lowerCamelCase_ ) ) if equal_length: SCREAMING_SNAKE_CASE : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE : Dict = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(lowerCamelCase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = TvltFeatureExtractor def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = TvltFeatureExtractionTester(self ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """feature_size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """hop_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """chunk_length""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """sampling_rate""" ) ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Any = feat_extract_first.save_pretrained(lowerCamelCase_ )[0] check_json_file_has_correct_format(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extraction_class.from_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : List[Any] = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : Optional[int] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Union[str, Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : int = os.path.join(lowerCamelCase_ , """feat_extract.json""" ) feat_extract_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class.from_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = feat_extract_first.to_dict() SCREAMING_SNAKE_CASE : int = feat_extract_second.to_dict() SCREAMING_SNAKE_CASE : List[str] = dict_first.pop("""mel_filters""" ) SCREAMING_SNAKE_CASE : Optional[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ ) ) self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] SCREAMING_SNAKE_CASE : int = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking SCREAMING_SNAKE_CASE : List[str] = feature_extractor( lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=lowerCamelCase_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE : Optional[int] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] SCREAMING_SNAKE_CASE : int = np.asarray(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = feature_extractor(lowerCamelCase_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def lowerCamelCase_ ( self : Any , lowerCamelCase_ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""" ).select(range(lowerCamelCase_ ) )[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE : Tuple = TvltFeatureExtractor() SCREAMING_SNAKE_CASE : List[Any] = feature_extractor(lowerCamelCase_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowerCamelCase_ , atol=1e-4 ) )
79
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class UpperCamelCase__ ( lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE__ = '''markuplm''' def __init__( self : Tuple , lowerCamelCase_ : Dict=3_05_22 , lowerCamelCase_ : Dict=7_68 , lowerCamelCase_ : Optional[Any]=12 , lowerCamelCase_ : int=12 , lowerCamelCase_ : Tuple=30_72 , lowerCamelCase_ : Dict="gelu" , lowerCamelCase_ : Dict=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Optional[Any]=5_12 , lowerCamelCase_ : List[Any]=2 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : List[Any]=1e-12 , lowerCamelCase_ : str=0 , lowerCamelCase_ : Union[str, Any]=0 , lowerCamelCase_ : int=2 , lowerCamelCase_ : Optional[int]=2_56 , lowerCamelCase_ : List[Any]=10_24 , lowerCamelCase_ : Any=2_16 , lowerCamelCase_ : str=10_01 , lowerCamelCase_ : Any=32 , lowerCamelCase_ : List[Any]=50 , lowerCamelCase_ : int="absolute" , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : int=None , **lowerCamelCase_ : List[str] , ): '''simple docstring''' super().__init__( pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : int = vocab_size SCREAMING_SNAKE_CASE : Tuple = hidden_size SCREAMING_SNAKE_CASE : str = num_hidden_layers SCREAMING_SNAKE_CASE : str = num_attention_heads SCREAMING_SNAKE_CASE : List[Any] = hidden_act SCREAMING_SNAKE_CASE : List[Any] = intermediate_size SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : int = max_position_embeddings SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size SCREAMING_SNAKE_CASE : Any = initializer_range SCREAMING_SNAKE_CASE : Any = layer_norm_eps SCREAMING_SNAKE_CASE : Any = position_embedding_type SCREAMING_SNAKE_CASE : Any = use_cache SCREAMING_SNAKE_CASE : Any = classifier_dropout # additional properties SCREAMING_SNAKE_CASE : str = max_depth SCREAMING_SNAKE_CASE : Dict = max_xpath_tag_unit_embeddings SCREAMING_SNAKE_CASE : Dict = max_xpath_subs_unit_embeddings SCREAMING_SNAKE_CASE : int = tag_pad_id SCREAMING_SNAKE_CASE : int = subs_pad_id SCREAMING_SNAKE_CASE : Any = xpath_unit_hidden_size
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __UpperCAmelCase = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
79
1
'''simple docstring''' from math import log from scipy.constants import Boltzmann, physical_constants __UpperCAmelCase = 300 # TEMPERATURE (unit = K) def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
79
'''simple docstring''' __UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
79
1
'''simple docstring''' import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[int]=7 , lowerCamelCase_ : Optional[Any]=3 , lowerCamelCase_ : Tuple=18 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : List[Any]=4_00 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : List[Any]=True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = size if size is not None else {"""height""": 18, """width""": 18} SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels SCREAMING_SNAKE_CASE : Any = image_size SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution SCREAMING_SNAKE_CASE : Dict = max_resolution SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize SCREAMING_SNAKE_CASE : List[Any] = size SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8_866_443_634_033_203, 0.6_618_829_369_544_983, 0.3_891_746_401_786_804], [-0.6_042_559_146_881_104, -0.02_295_008_860_528_469, 0.5_423_797_369_003_296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class UpperCamelCase__ ( lowercase_ , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ImageGPTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = ImageGPTImageProcessingTester(self ) @property def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCamelCase_ , """clusters""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """size""" ) ) self.assertTrue(hasattr(lowerCamelCase_ , """do_normalize""" ) ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) SCREAMING_SNAKE_CASE : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class(**self.image_processor_dict ) SCREAMING_SNAKE_CASE : int = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , obj[key] ) ) else: self.assertEqual(obj[key] , lowerCamelCase_ ) def lowerCamelCase_ ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCamelCase_ , """image_processor.json""" ) image_processor_first.to_json_file(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class.from_json_file(lowerCamelCase_ ).to_dict() SCREAMING_SNAKE_CASE : int = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_pretrained(lowerCamelCase_ ).to_dict() SCREAMING_SNAKE_CASE : Tuple = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(lowerCamelCase_ , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , lowerCamelCase_ ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def lowerCamelCase_ ( self : Tuple ): '''simple docstring''' pass def __A ( ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) SCREAMING_SNAKE_CASE : int = Image.open(dataset[4]["""file"""] ) SCREAMING_SNAKE_CASE : List[str] = Image.open(dataset[5]["""file"""] ) SCREAMING_SNAKE_CASE : List[Any] = [imagea, imagea] return images @require_vision @require_torch class UpperCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) SCREAMING_SNAKE_CASE : List[Any] = prepare_images() # test non-batched SCREAMING_SNAKE_CASE : str = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 10_24) ) SCREAMING_SNAKE_CASE : Dict = [3_06, 1_91, 1_91] self.assertEqual(encoding.input_ids[0, :3].tolist() , lowerCamelCase_ ) # test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(lowerCamelCase_ , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 10_24) ) SCREAMING_SNAKE_CASE : List[Any] = [3_03, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowerCamelCase_ )
79
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name __UpperCAmelCase = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline >>> from transformers import pipeline >>> from diffusers.utils import load_image >>> def make_hint(image, depth_estimator): ... image = depth_estimator(image)[\"depth\"] ... image = np.array(image) ... image = image[:, :, None] ... image = np.concatenate([image, image, image], axis=2) ... detected_map = torch.from_numpy(image).float() / 255.0 ... hint = detected_map.permute(2, 0, 1) ... return hint >>> depth_estimator = pipeline(\"depth-estimation\") >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior = pipe_prior.to(\"cuda\") >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16 ... ) >>> pipe = pipe.to(\"cuda\") >>> img = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/cat.png\" ... ).resize((768, 768)) >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\") >>> prompt = \"A robot, 4k photo\" >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\" >>> generator = torch.Generator(device=\"cuda\").manual_seed(43) >>> image_emb, zero_image_emb = pipe_prior( ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator ... ).to_tuple() >>> images = pipe( ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... hint=hint, ... num_inference_steps=50, ... generator=generator, ... height=768, ... width=768, ... ).images >>> images[0].save(\"robot_cat.png\") ``` """ def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=8 ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 SCREAMING_SNAKE_CASE : List[str] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCamelCase__ ( lowercase_ ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCamelCase_ : UNetaDConditionModel , lowerCamelCase_ : DDPMScheduler , lowerCamelCase_ : VQModel , ): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase_ , scheduler=lowerCamelCase_ , movq=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): '''simple docstring''' if latents is None: SCREAMING_SNAKE_CASE : Tuple = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) SCREAMING_SNAKE_CASE : Dict = latents.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = latents * scheduler.init_noise_sigma return latents def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) SCREAMING_SNAKE_CASE : List[Any] = torch.device(f'''cuda:{gpu_id}''' ) SCREAMING_SNAKE_CASE : Union[str, Any] = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : Tuple=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) SCREAMING_SNAKE_CASE : Any = torch.device(f'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=lowerCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) SCREAMING_SNAKE_CASE : Union[str, Any] = None for cpu_offloaded_model in [self.unet, self.movq]: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(lowerCamelCase_ , lowerCamelCase_ , prev_module_hook=lowerCamelCase_ ) # We'll offload the last model manually. SCREAMING_SNAKE_CASE : str = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCamelCase_ ( self : str ): '''simple docstring''' if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase_ ) def __call__( self : Optional[Any] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 5_12 , lowerCamelCase_ : int = 1_00 , lowerCamelCase_ : float = 4.0 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : Optional[torch.FloatTensor] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self._execution_device SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : int = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Dict = torch.cat(lowerCamelCase_ , dim=0 ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Any = image_embeds.shape[0] * num_images_per_prompt if do_classifier_free_guidance: SCREAMING_SNAKE_CASE : List[Any] = image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : Dict = hint.repeat_interleave(lowerCamelCase_ , dim=0 ) SCREAMING_SNAKE_CASE : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCamelCase_ ) self.scheduler.set_timesteps(lowerCamelCase_ , device=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.timesteps SCREAMING_SNAKE_CASE : Any = self.movq.config.latent_channels SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = downscale_height_and_width(lowerCamelCase_ , lowerCamelCase_ , self.movq_scale_factor ) # create initial latent SCREAMING_SNAKE_CASE : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the latents if we are doing classifier free guidance SCREAMING_SNAKE_CASE : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents SCREAMING_SNAKE_CASE : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint} SCREAMING_SNAKE_CASE : Dict = self.unet( sample=lowerCamelCase_ , timestep=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , added_cond_kwargs=lowerCamelCase_ , return_dict=lowerCamelCase_ , )[0] if do_classifier_free_guidance: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = noise_pred.split(latents.shape[1] , dim=1 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.chunk(2 ) SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = variance_pred.chunk(2 ) SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) SCREAMING_SNAKE_CASE : str = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 SCREAMING_SNAKE_CASE : str = self.scheduler.step( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , generator=lowerCamelCase_ , )[0] # post-processing SCREAMING_SNAKE_CASE : List[str] = self.movq.decode(lowerCamelCase_ , force_not_quantize=lowerCamelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: SCREAMING_SNAKE_CASE : Optional[int] = image * 0.5 + 0.5 SCREAMING_SNAKE_CASE : List[Any] = image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ )
79
1