code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase=0.9_99 , _UpperCAmelCase="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(_UpperCAmelCase): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(_UpperCAmelCase): return math.exp(t * -12.0) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''') SCREAMING_SNAKE_CASE = [] for i in range(lowercase__): SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__) / alpha_bar_fn(lowercase__) , lowercase__)) return torch.tensor(lowercase__ , dtype=torch.floataa) class _snake_case ( A__ , A__ ): _lowercase : Optional[int] = [e.name for e in KarrasDiffusionSchedulers] _lowercase : Optional[Any] = 2 @register_to_config def __init__( self , a = 1000 , a = 0.0_00_85 , a = 0.0_12 , a = "linear" , a = None , a = "epsilon" , a = "linspace" , a = 0 , ) -> Tuple: if trained_betas is not None: SCREAMING_SNAKE_CASE = torch.tensor(__A , dtype=torch.floataa) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE = torch.linspace(__A , __A , __A , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , __A , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE = betas_for_alpha_bar(__A) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''') SCREAMING_SNAKE_CASE = 1.0 - self.betas SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(__A , __A , __A) def SCREAMING_SNAKE_CASE__ ( self , a , a=None) -> List[str]: if schedule_timesteps is None: SCREAMING_SNAKE_CASE = self.timesteps SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: SCREAMING_SNAKE_CASE = 1 if len(__A) > 1 else 0 else: SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(__A) else timestep SCREAMING_SNAKE_CASE = self._index_counter[timestep_int] return indices[pos].item() @property def SCREAMING_SNAKE_CASE__ ( self) -> str: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def SCREAMING_SNAKE_CASE__ ( self , a , a , ) -> torch.FloatTensor: SCREAMING_SNAKE_CASE = self.index_for_timestep(__A) if self.state_in_first_order: SCREAMING_SNAKE_CASE = self.sigmas[step_index] else: SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5) return sample def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , ) -> Tuple: SCREAMING_SNAKE_CASE = num_inference_steps SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE = np.linspace(0 , num_train_timesteps - 1 , __A , dtype=__A)[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE = (np.arange(0 , __A) * step_ratio).round()[::-1].copy().astype(__A) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE = (np.arange(__A , 0 , -step_ratio)).round().copy().astype(__A) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''') SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(__A)).to(__A) SCREAMING_SNAKE_CASE = np.interp(__A , np.arange(0 , len(__A)) , __A) SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]]).astype(np.floataa) SCREAMING_SNAKE_CASE = torch.from_numpy(__A).to(device=__A) # interpolate sigmas SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) SCREAMING_SNAKE_CASE = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(__A).startswith('mps'): # mps does not support float64 SCREAMING_SNAKE_CASE = torch.from_numpy(__A).to(__A , dtype=torch.floataa) else: SCREAMING_SNAKE_CASE = torch.from_numpy(__A).to(__A) # interpolate timesteps SCREAMING_SNAKE_CASE = self.sigma_to_t(__A).to(__A , dtype=timesteps.dtype) SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps]) SCREAMING_SNAKE_CASE = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE = defaultdict(__A) def SCREAMING_SNAKE_CASE__ ( self , a) -> Union[str, Any]: # get log sigma SCREAMING_SNAKE_CASE = sigma.log() # get distribution SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None] # get sigmas range SCREAMING_SNAKE_CASE = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) SCREAMING_SNAKE_CASE = low_idx + 1 SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx] SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE = w.clamp(0 , 1) # transform interpolation to time range SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE = t.view(sigma.shape) return t @property def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]: return self.sample is None def SCREAMING_SNAKE_CASE__ ( self , a , a , a , a = True , ) -> Union[SchedulerOutput, Tuple]: SCREAMING_SNAKE_CASE = self.index_for_timestep(__A) # advance index counter by 1 SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(__A) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE = self.sigmas[step_index] SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1] SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''') if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep SCREAMING_SNAKE_CASE = sigma_next - sigma_hat SCREAMING_SNAKE_CASE = self.sample SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__A) def SCREAMING_SNAKE_CASE__ ( self , a , a , a , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(__A): # mps does not support float64 SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device , dtype=torch.floataa) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device , dtype=torch.floataa) else: SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device) SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device) SCREAMING_SNAKE_CASE = [self.index_for_timestep(__A , __A) for t in timesteps] SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1) SCREAMING_SNAKE_CASE = original_samples + noise * sigma return noisy_samples def __len__( self) -> List[str]: return self.config.num_train_timesteps
137
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function __UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s __UpperCAmelCase = 3e8 # unit of c : m * s^-1 def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if force < 0: raise ValueError("""Magnitude of force can not be negative""" ) if distance < 0: raise ValueError("""Distance can not be negative""" ) if area < 0: raise ValueError("""Area can not be negative""" ) if force == 0: lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 2_4_0 * (distance) ** 4 ) return {"force": force} elif area == 0: lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowerCAmelCase_ :Any = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("""One and only one argument must be 0""" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
84
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters UpperCAmelCase__ = logging.get_logger(__name__) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase=None ,lowercase=None ): """simple docstring""" # Recurse if needed if "." in tensor_name: _UpperCAmelCase = tensor_name.split(""".""" ) for split in splits[:-1]: _UpperCAmelCase = getattr(lowercase ,lowercase ) if new_module is None: raise ValueError(f'''{module} has no attribute {split}.''' ) _UpperCAmelCase = new_module _UpperCAmelCase = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) _UpperCAmelCase = tensor_name in module._buffers _UpperCAmelCase = getattr(lowercase ,lowercase ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(f'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) _UpperCAmelCase = False _UpperCAmelCase = False if is_buffer or not is_bitsandbytes_available(): _UpperCAmelCase = False _UpperCAmelCase = False else: _UpperCAmelCase = hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit ) _UpperCAmelCase = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams ) if is_abit or is_abit: _UpperCAmelCase = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _UpperCAmelCase = old_value.to(lowercase ) elif isinstance(lowercase ,torch.Tensor ): _UpperCAmelCase = value.to("""cpu""" ) if value.dtype == torch.inta: _UpperCAmelCase = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: _UpperCAmelCase = torch.tensor(lowercase ,device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls ,lowercase ) and fpaa_statistics is None: _UpperCAmelCase = new_value.T _UpperCAmelCase = old_value.__dict__ if is_abit: _UpperCAmelCase = bnb.nn.IntaParams(lowercase ,requires_grad=lowercase ,**lowercase ).to(lowercase ) elif is_abit: _UpperCAmelCase = bnb.nn.Paramsabit(lowercase ,requires_grad=lowercase ,**lowercase ).to(lowercase ) _UpperCAmelCase = new_value if fpaa_statistics is not None: setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(lowercase ) ) else: if value is None: _UpperCAmelCase = old_value.to(lowercase ) elif isinstance(lowercase ,torch.Tensor ): _UpperCAmelCase = value.to(lowercase ) else: _UpperCAmelCase = torch.tensor(lowercase ,device=lowercase ) if is_buffer: _UpperCAmelCase = new_value else: _UpperCAmelCase = nn.Parameter(lowercase ,requires_grad=old_value.requires_grad ) _UpperCAmelCase = new_value def __UpperCAmelCase ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ,lowercase=False ): """simple docstring""" for name, module in model.named_children(): if current_key_name is None: _UpperCAmelCase = [] current_key_name.append(lowercase ) if (isinstance(lowercase ,nn.Linear ) or isinstance(lowercase ,lowercase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(lowercase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase ,lowercase ): _UpperCAmelCase , _UpperCAmelCase = module.weight.shape else: _UpperCAmelCase = module.in_features _UpperCAmelCase = module.out_features if quantization_config.quantization_method() == "llm_int8": _UpperCAmelCase = bnb.nn.LinearabitLt( lowercase ,lowercase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,) _UpperCAmelCase = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _UpperCAmelCase = bnb.nn.Linearabit( lowercase ,lowercase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,) _UpperCAmelCase = True # Store the module class in case we need to transpose the weight later _UpperCAmelCase = type(lowercase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase ) if len(list(module.children() ) ) > 0: _UpperCAmelCase , _UpperCAmelCase = _replace_with_bnb_linear( lowercase ,lowercase ,lowercase ,lowercase ,has_been_replaced=lowercase ,) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def __UpperCAmelCase ( lowercase ,lowercase=None ,lowercase=None ,lowercase=None ): """simple docstring""" _UpperCAmelCase = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert _UpperCAmelCase , _UpperCAmelCase = _replace_with_bnb_linear( lowercase ,lowercase ,lowercase ,lowercase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def __UpperCAmelCase ( *lowercase ,**lowercase ): """simple docstring""" warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,lowercase ,) return replace_with_bnb_linear(*lowercase ,**lowercase ) def __UpperCAmelCase ( *lowercase ,**lowercase ): """simple docstring""" warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,lowercase ,) return set_module_quantized_tensor_to_device(*lowercase ,**lowercase ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = deepcopy(lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _UpperCAmelCase = find_tied_parameters(lowercase ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase ,lowercase ): _UpperCAmelCase = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() ) else: _UpperCAmelCase = sum(lowercase ,[] ) _UpperCAmelCase = len(lowercase ) > 0 # Check if it is a base model _UpperCAmelCase = not hasattr(lowercase ,model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _UpperCAmelCase = list(model.named_children() ) _UpperCAmelCase = [list_modules[-1][0]] # add last module together with tied weights _UpperCAmelCase = set(lowercase ) - set(lowercase ) _UpperCAmelCase = list(set(lowercase ) ) + list(lowercase ) # remove ".weight" from the keys _UpperCAmelCase = [""".weight""", """.bias"""] _UpperCAmelCase = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _UpperCAmelCase = name.replace(lowercase ,"""""" ) filtered_module_names.append(lowercase ) return filtered_module_names
363
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCAmelCase__ = logging.get_logger(__name__) class a ( lowerCAmelCase_ ): _snake_case : List[str] = 'upernet' def __init__( self : Tuple , __lowerCAmelCase : int=None , __lowerCAmelCase : Tuple=512 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Tuple=[1, 2, 3, 6] , __lowerCAmelCase : Any=True , __lowerCAmelCase : Any=0.4 , __lowerCAmelCase : Union[str, Any]=384 , __lowerCAmelCase : Optional[int]=256 , __lowerCAmelCase : List[str]=1 , __lowerCAmelCase : Optional[int]=False , __lowerCAmelCase : Optional[int]=255 , **__lowerCAmelCase : Union[str, Any] , ): super().__init__(**__lowerCAmelCase ) if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) _UpperCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) elif isinstance(__lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase = backbone_config.get("""model_type""" ) _UpperCAmelCase = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase = config_class.from_dict(__lowerCAmelCase ) _UpperCAmelCase = backbone_config _UpperCAmelCase = hidden_size _UpperCAmelCase = initializer_range _UpperCAmelCase = pool_scales _UpperCAmelCase = use_auxiliary_head _UpperCAmelCase = auxiliary_loss_weight _UpperCAmelCase = auxiliary_in_channels _UpperCAmelCase = auxiliary_channels _UpperCAmelCase = auxiliary_num_convs _UpperCAmelCase = auxiliary_concat_input _UpperCAmelCase = loss_ignore_index def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = copy.deepcopy(self.__dict__ ) _UpperCAmelCase = self.backbone_config.to_dict() _UpperCAmelCase = self.__class__.model_type return output
30
0
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class UpperCAmelCase ( __A , unittest.TestCase ): '''simple docstring''' lowerCamelCase_ = BarthezTokenizer lowerCamelCase_ = BarthezTokenizerFast lowerCamelCase_ = True lowerCamelCase_ = True def lowerCAmelCase_ ( self ): """simple docstring""" super().setUp() A_ : Union[str, Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase ) A_ : int = tokenizer def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = '<pad>' A_ : List[str] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase ) , lowercase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase ) , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" A_ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(lowercase ) , 1_0_1_1_2_2 ) def lowerCAmelCase_ ( self ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 ) @require_torch def lowerCAmelCase_ ( self ): """simple docstring""" A_ : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] A_ : List[Any] = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2] A_ : str = self.tokenizer( lowercase , max_length=len(lowercase ) , padding=lowercase , truncation=lowercase , return_tensors='pt' ) self.assertIsInstance(lowercase , lowercase ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) A_ : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase , lowercase ) def lowerCAmelCase_ ( self ): """simple docstring""" if not self.test_rust_tokenizer: return A_ : int = self.get_tokenizer() A_ : str = self.get_rust_tokenizer() A_ : Any = 'I was born in 92000, and this is falsé.' A_ : Any = tokenizer.tokenize(lowercase ) A_ : Union[str, Any] = rust_tokenizer.tokenize(lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : Optional[Any] = tokenizer.encode(lowercase , add_special_tokens=lowercase ) A_ : List[Any] = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase ) self.assertListEqual(lowercase , lowercase ) A_ : Optional[Any] = self.get_rust_tokenizer() A_ : int = tokenizer.encode(lowercase ) A_ : int = rust_tokenizer.encode(lowercase ) self.assertListEqual(lowercase , lowercase ) @slow def lowerCAmelCase_ ( self ): """simple docstring""" A_ : List[Any] = {'input_ids': [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. A_ : Optional[int] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=lowercase , )
140
import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) _UpperCAmelCase = models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="""relu""") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="""relu""")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="""relu""")) classifier.add(layers.Dense(units=1, activation="""sigmoid""")) # Compiling the CNN classifier.compile( optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') _UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) _UpperCAmelCase = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) _UpperCAmelCase = train_datagen.flow_from_directory( """dataset/training_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) _UpperCAmelCase = test_datagen.flow_from_directory( """dataset/test_set""", target_size=(64, 64), batch_size=32, class_mode="""binary""" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("""cnn.h5""") # Part 3 - Making new predictions _UpperCAmelCase = tf.keras.preprocessing.image.load_img( """dataset/single_prediction/image.png""", target_size=(64, 64) ) _UpperCAmelCase = tf.keras.preprocessing.image.img_to_array(test_image) _UpperCAmelCase = np.expand_dims(test_image, axis=0) _UpperCAmelCase = classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: _UpperCAmelCase = """Normal""" if result[0][0] == 1: _UpperCAmelCase = """Abnormality detected"""
140
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Any = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Dict = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Union[str, Any] = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
345
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed UpperCamelCase : int = """true""" def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]: """simple docstring""" set_seed(42 ) a : List[str] = RegressionModel() a : Union[str, Any] = deepcopy(snake_case ) a : Dict = RegressionDataset(length=snake_case ) a : Dict = DataLoader(snake_case , batch_size=snake_case ) model.to(accelerator.device ) a , a : Optional[int] = accelerator.prepare(snake_case , snake_case ) return model, ddp_model, dataloader def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]: """simple docstring""" a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' ) a : Any = load_dataset('glue' , 'mrpc' , split='validation' ) def tokenize_function(snake_case : int ): a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case ) return outputs with accelerator.main_process_first(): a : Dict = dataset.map( snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , ) a : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case : Optional[Any] ): if use_longest: return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' ) return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' ) return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case ) a : List[str] = get_dataloader(snake_case , not dispatch_batches ) a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained( 'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case ) a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]: """simple docstring""" a : Dict = [] for batch in dataloader: a , a : Any = batch.values() with torch.no_grad(): a : Tuple = model(snake_case ) a , a : Dict = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) a , a : List[str] = [], [] for logit, targ in logits_and_targets: logits.append(snake_case ) targs.append(snake_case ) a , a : Any = torch.cat(snake_case ), torch.cat(snake_case ) return logits, targs def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]: """simple docstring""" a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case ) a , a : int = generate_predictions(snake_case , snake_case , snake_case ) assert ( len(snake_case ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}""" def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]: """simple docstring""" a : int = evaluate.load('glue' , 'mrpc' ) a , a : Tuple = get_mrpc_setup(snake_case , snake_case ) # First do baseline a , a , a : Tuple = setup['no'] model.to(snake_case ) model.eval() for batch in dataloader: batch.to(snake_case ) with torch.inference_mode(): a : List[Any] = model(**snake_case ) a : Optional[Any] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=snake_case , references=batch['labels'] ) a : Tuple = metric.compute() # Then do distributed a , a , a : Tuple = setup['ddp'] model.eval() for batch in dataloader: with torch.inference_mode(): a : List[str] = model(**snake_case ) a : Optional[Any] = outputs.logits.argmax(dim=-1 ) a : Optional[int] = batch['labels'] a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=snake_case , references=snake_case ) a : str = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def SCREAMING_SNAKE_CASE__ ( ) -> str: """simple docstring""" a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('**Testing gather_for_metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(snake_case , snake_case ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test torch metrics**' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(snake_case , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('**Test last batch is not dropped when perfectly divisible**' ) a : Optional[Any] = Accelerator() test_torch_metrics(snake_case , 512 ) accelerator.state._reset_state() def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
345
1
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig _UpperCAmelCase : int = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = { """Intel/dpt-large""": """https://huggingface.co/Intel/dpt-large/resolve/main/config.json""", # See all DPT models at https://huggingface.co/models?filter=dpt } class a__ ( __A ): """simple docstring""" __UpperCamelCase : int = 'dpt' def __init__(self , __lowercase=7_68 , __lowercase=12 , __lowercase=12 , __lowercase=30_72 , __lowercase="gelu" , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.0_2 , __lowercase=1e-12 , __lowercase=3_84 , __lowercase=16 , __lowercase=3 , __lowercase=False , __lowercase=True , __lowercase=[2, 5, 8, 11] , __lowercase="project" , __lowercase=[4, 2, 1, 0.5] , __lowercase=[96, 1_92, 3_84, 7_68] , __lowercase=2_56 , __lowercase=-1 , __lowercase=False , __lowercase=True , __lowercase=0.4 , __lowercase=2_55 , __lowercase=0.1 , __lowercase=[1, 10_24, 24, 24] , __lowercase=[0, 1] , __lowercase=None , **__lowercase , ): super().__init__(**__lowercase ) __lowerCAmelCase = hidden_size __lowerCAmelCase = is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('''Initializing the config with a `BiT` backbone.''' ) __lowerCAmelCase = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, } __lowerCAmelCase = BitConfig(**__lowercase ) elif isinstance(__lowercase , __lowercase ): logger.info('''Initializing the config with a `BiT` backbone.''' ) __lowerCAmelCase = BitConfig(**__lowercase ) elif isinstance(__lowercase , __lowercase ): __lowerCAmelCase = backbone_config else: raise ValueError( F"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" ) __lowerCAmelCase = backbone_featmap_shape __lowerCAmelCase = neck_ignore_stages if readout_type != "project": raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' ) else: __lowerCAmelCase = None __lowerCAmelCase = None __lowerCAmelCase = [] __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = qkv_bias __lowerCAmelCase = backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' ) __lowerCAmelCase = readout_type __lowerCAmelCase = reassemble_factors __lowerCAmelCase = neck_hidden_sizes __lowerCAmelCase = fusion_hidden_size __lowerCAmelCase = head_in_index __lowerCAmelCase = use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) __lowerCAmelCase = use_auxiliary_head __lowerCAmelCase = auxiliary_loss_weight __lowerCAmelCase = semantic_loss_ignore_index __lowerCAmelCase = semantic_classifier_dropout def _snake_case (self ): __lowerCAmelCase = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: __lowerCAmelCase = self.backbone_config.to_dict() __lowerCAmelCase = self.__class__.model_type return output
174
'''simple docstring''' def __magic_name__( lowerCamelCase, lowerCamelCase): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) __lowerCAmelCase = (boundary[1] - boundary[0]) / steps __lowerCAmelCase = boundary[0] __lowerCAmelCase = boundary[1] __lowerCAmelCase = make_points(lowerCamelCase, lowerCamelCase, lowerCamelCase) __lowerCAmelCase = 0.0 y += (h / 2.0) * f(lowerCamelCase) for i in x_i: # print(i) y += h * f(lowerCamelCase) y += (h / 2.0) * f(lowerCamelCase) return y def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase): __lowerCAmelCase = a + h while x < (b - h): yield x __lowerCAmelCase = x + h def __magic_name__( lowerCamelCase): # enter your function here __lowerCAmelCase = (x - 0) * (x - 0) return y def __magic_name__( ): __lowerCAmelCase = 0.0 # Lower bound of integration __lowerCAmelCase = 1.0 # Upper bound of integration __lowerCAmelCase = 10.0 # define number of steps or resolution __lowerCAmelCase = [a, b] # define boundary of integration __lowerCAmelCase = method_a(lowerCamelCase, lowerCamelCase) print(F"""y = {y}""") if __name__ == "__main__": main()
174
1
'''simple docstring''' import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast __lowerCAmelCase = datasets.utils.logging.get_logger(__name__) @dataclass class UpperCAmelCase__ ( datasets.BuilderConfig ): """simple docstring""" __UpperCAmelCase : int = 1_0000 __UpperCAmelCase : Optional[List[str]] = None __UpperCAmelCase : Optional[datasets.Features] = None class UpperCAmelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" __UpperCAmelCase : List[str] = ParquetConfig def __lowercase ( self : int ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self : int ,_a : List[Any] ): '''simple docstring''' if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : Tuple = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a ,(str, list, tuple) ): _a : int = data_files if isinstance(_a ,_a ): _a : Tuple = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : int = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] _a : Dict = [] for split_name, files in data_files.items(): if isinstance(_a ,_a ): _a : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : Optional[int] = [dl_manager.iter_files(_a ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_a ): with open(_a ,'rb' ) as f: _a : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_a ) ) break splits.append(datasets.SplitGenerator(name=_a ,gen_kwargs={'files': files} ) ) return splits def __lowercase ( self : List[str] ,_a : pa.Table ): '''simple docstring''' if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _a : List[str] = table_cast(_a ,self.info.features.arrow_schema ) return pa_table def __lowercase ( self : Any ,_a : Optional[int] ): '''simple docstring''' _a : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" ) for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a ,'rb' ) as f: _a : int = pq.ParquetFile(_a ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size ,columns=self.config.columns ) ): _a : Tuple = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F"""{file_idx}_{batch_idx}""", self._cast_table(_a ) except ValueError as e: logger.error(F"""Failed to read file '{file}' with error {type(_a )}: {e}""" ) raise
5
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __lowerCAmelCase = threading.Lock() __lowerCAmelCase = None __lowerCAmelCase = { """debug""": logging.DEBUG, """info""": logging.INFO, """warning""": logging.WARNING, """error""": logging.ERROR, """critical""": logging.CRITICAL, } __lowerCAmelCase = logging.WARNING __lowerCAmelCase = True def UpperCAmelCase_ (): """simple docstring""" _a : Dict = os.getenv('TRANSFORMERS_VERBOSITY' , __a ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """ f"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def UpperCAmelCase_ (): """simple docstring""" return __name__.split('.' )[0] def UpperCAmelCase_ (): """simple docstring""" return logging.getLogger(_get_library_name() ) def UpperCAmelCase_ (): """simple docstring""" global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _a : str = logging.StreamHandler() # Set sys.stderr as stream. _a : Optional[Any] = sys.stderr.flush # Apply our default configuration to the library root logger. _a : List[Any] = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) _a : List[str] = False def UpperCAmelCase_ (): """simple docstring""" global _default_handler with _lock: if not _default_handler: return _a : int = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) _a : str = None def UpperCAmelCase_ (): """simple docstring""" return log_levels def UpperCAmelCase_ (__a : Optional[str] = None ): """simple docstring""" if name is None: _a : List[Any] = _get_library_name() _configure_library_root_logger() return logging.getLogger(__a ) def UpperCAmelCase_ (): """simple docstring""" _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def UpperCAmelCase_ (__a : int ): """simple docstring""" _configure_library_root_logger() _get_library_root_logger().setLevel(__a ) def UpperCAmelCase_ (): """simple docstring""" return set_verbosity(__a ) def UpperCAmelCase_ (): """simple docstring""" return set_verbosity(__a ) def UpperCAmelCase_ (): """simple docstring""" return set_verbosity(__a ) def UpperCAmelCase_ (): """simple docstring""" return set_verbosity(__a ) def UpperCAmelCase_ (): """simple docstring""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def UpperCAmelCase_ (): """simple docstring""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def UpperCAmelCase_ (__a : logging.Handler ): """simple docstring""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(__a ) def UpperCAmelCase_ (__a : logging.Handler ): """simple docstring""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(__a ) def UpperCAmelCase_ (): """simple docstring""" _configure_library_root_logger() _a : Union[str, Any] = False def UpperCAmelCase_ (): """simple docstring""" _configure_library_root_logger() _a : Dict = True def UpperCAmelCase_ (): """simple docstring""" _a : Any = _get_library_root_logger().handlers for handler in handlers: _a : Union[str, Any] = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s' ) handler.setFormatter(__a ) def UpperCAmelCase_ (): """simple docstring""" _a : Union[str, Any] = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(__a ) def UpperCAmelCase_ (self : Union[str, Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ): """simple docstring""" _a : Union[str, Any] = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , __a ) if no_advisory_warnings: return self.warning(*__a , **__a ) __lowerCAmelCase = warning_advice @functools.lru_cache(__a ) def UpperCAmelCase_ (self : int , *__a : Optional[Any] , **__a : Any ): """simple docstring""" self.warning(*__a , **__a ) __lowerCAmelCase = warning_once class UpperCAmelCase__ : """simple docstring""" def __init__( self : Any ,*_a : Tuple ,**_a : int ): # pylint: disable=unused-argument '''simple docstring''' _a : int = args[0] if args else None def __iter__( self : str ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self : List[Any] ,_a : int ): '''simple docstring''' def empty_fn(*_a : Optional[Any] ,**_a : Any ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : List[str] ): '''simple docstring''' return self def __exit__( self : List[str] ,_a : str ,_a : List[Any] ,_a : str ): '''simple docstring''' return class UpperCAmelCase__ : """simple docstring""" def __call__( self : Union[str, Any] ,*_a : Tuple ,**_a : Tuple ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm(*_a ,**_a ) else: return EmptyTqdm(*_a ,**_a ) def __lowercase ( self : str ,*_a : List[Any] ,**_a : Any ): '''simple docstring''' _a : Any = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*_a ,**_a ) def __lowercase ( self : List[str] ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() __lowerCAmelCase = _tqdm_cls() def UpperCAmelCase_ (): """simple docstring""" global _tqdm_active return bool(_tqdm_active ) def UpperCAmelCase_ (): """simple docstring""" global _tqdm_active _a : str = True hf_hub_utils.enable_progress_bars() def UpperCAmelCase_ (): """simple docstring""" global _tqdm_active _a : Dict = False hf_hub_utils.disable_progress_bars()
5
1
import os def SCREAMING_SNAKE_CASE ( _UpperCAmelCase = "matrix.txt" ) -> int: with open(os.path.join(os.path.dirname(_UpperCAmelCase ) , _UpperCAmelCase ) ) as in_file: lowerCamelCase__ : int = in_file.read() lowerCamelCase__ : Tuple = [[int(_UpperCAmelCase ) for cell in row.split(',' )] for row in data.strip().splitlines()] lowerCamelCase__ : Any = [[0 for cell in row] for row in grid] lowerCamelCase__ : Optional[int] = len(grid[0] ) lowerCamelCase__ : Optional[Any] = [[0 for i in range(_UpperCAmelCase )] for j in range(_UpperCAmelCase )] lowerCamelCase__ : Any = grid[0][0] for i in range(1 , _UpperCAmelCase ): lowerCamelCase__ : Any = grid[0][i] + dp[0][i - 1] for i in range(1 , _UpperCAmelCase ): lowerCamelCase__ : Optional[int] = grid[i][0] + dp[i - 1][0] for i in range(1 , _UpperCAmelCase ): for j in range(1 , _UpperCAmelCase ): lowerCamelCase__ : List[Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F"""{solution() = }""")
50
import flax.linen as nn import jax import jax.numpy as jnp class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = jnp.floataa def A_ ( self : Any ) -> Any: lowerCamelCase__ : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : int , UpperCAmelCase : Dict ) -> Optional[Any]: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = hidden_states.shape lowerCamelCase__ : Union[str, Any] = jax.image.resize( UpperCAmelCase , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase ) return hidden_states class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = jnp.floataa def A_ ( self : List[str] ) -> int: lowerCamelCase__ : Tuple = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : str , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) lowerCamelCase__ : Optional[Any] = self.conv(UpperCAmelCase ) return hidden_states class lowerCAmelCase ( nn.Module ): UpperCAmelCase__ = 42 UpperCAmelCase__ = None UpperCAmelCase__ = 0.0 UpperCAmelCase__ = None UpperCAmelCase__ = jnp.floataa def A_ ( self : List[str] ) -> Union[str, Any]: lowerCamelCase__ : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels lowerCamelCase__ : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) lowerCamelCase__ : int = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase__ : Union[str, Any] = nn.Dense(UpperCAmelCase , dtype=self.dtype ) lowerCamelCase__ : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) lowerCamelCase__ : List[Any] = nn.Dropout(self.dropout_prob ) lowerCamelCase__ : Tuple = nn.Conv( UpperCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) lowerCamelCase__ : Optional[Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut lowerCamelCase__ : Union[str, Any] = None if use_nin_shortcut: lowerCamelCase__ : Dict = nn.Conv( UpperCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int=True ) -> Optional[int]: lowerCamelCase__ : Union[str, Any] = hidden_states lowerCamelCase__ : List[Any] = self.norma(UpperCAmelCase ) lowerCamelCase__ : List[Any] = nn.swish(UpperCAmelCase ) lowerCamelCase__ : Any = self.conva(UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = self.time_emb_proj(nn.swish(UpperCAmelCase ) ) lowerCamelCase__ : List[str] = jnp.expand_dims(jnp.expand_dims(UpperCAmelCase , 1 ) , 1 ) lowerCamelCase__ : List[str] = hidden_states + temb lowerCamelCase__ : Optional[Any] = self.norma(UpperCAmelCase ) lowerCamelCase__ : List[str] = nn.swish(UpperCAmelCase ) lowerCamelCase__ : Optional[int] = self.dropout(UpperCAmelCase , UpperCAmelCase ) lowerCamelCase__ : str = self.conva(UpperCAmelCase ) if self.conv_shortcut is not None: lowerCamelCase__ : Dict = self.conv_shortcut(UpperCAmelCase ) return hidden_states + residual
50
1
from __future__ import annotations import math from collections.abc import Callable def __UpperCAmelCase ( __a : Callable[[int | float], int | float] ,__a : int | float ,__a : int | float ,__a : int = 100 ,) -> float: """simple docstring""" _a : Tuple = x_start _a : List[str] = fnc(__a ) _a : Optional[int] = 0.0 for _ in range(__a ): # Approximates curve as a sequence of linear lines and sums their length _a : Dict = (x_end - x_start) / steps + xa _a : Dict = fnc(__a ) length += math.hypot(xa - xa ,fxa - fxa ) # Increment step _a : Optional[Any] = xa _a : Any = fxa return length if __name__ == "__main__": def __UpperCAmelCase ( __a : Any ) -> Dict: """simple docstring""" return math.sin(10 * x ) print('''f(x) = sin(10 * x)''') print('''The length of the curve from x = -10 to x = 10 is:''') a__ = 10 while i <= 100000: print(f'''With {i} steps: {line_length(f, -10, 10, i)}''') i *= 10
15
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig ): """simple docstring""" UpperCAmelCase__ : Optional[datasets.Features] = None class UpperCAmelCase_ ( datasets.ArrowBasedBuilder ): """simple docstring""" UpperCAmelCase__ : Any = PandasConfig def __lowercase ( self ) -> Any: return datasets.DatasetInfo(features=self.config.features ) def __lowercase ( self , _a ) -> List[Any]: if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _a : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_a , (str, list, tuple) ): _a : Dict = data_files if isinstance(_a , _a ): _a : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : int = [dl_manager.iter_files(_a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] _a : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(_a , _a ): _a : List[str] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _a : Any = [dl_manager.iter_files(_a ) for file in files] splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={'''files''': files} ) ) return splits def __lowercase ( self , _a ) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _a : Optional[Any] = table_cast(_a , self.config.features.arrow_schema ) return pa_table def __lowercase ( self , _a ) -> List[str]: for i, file in enumerate(itertools.chain.from_iterable(_a ) ): with open(_a , '''rb''' ) as f: _a : str = pa.Table.from_pandas(pd.read_pickle(_a ) ) yield i, self._cast_table(_a )
15
1
'''simple docstring''' import re def _A ( A__ ): """simple docstring""" __lowercase = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(A__ , A__ ) ) if __name__ == "__main__": lowerCAmelCase__ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
104
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowercase = { """configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""], """tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""BertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BertForMaskedLM""", """BertForMultipleChoice""", """BertForNextSentencePrediction""", """BertForPreTraining""", """BertForQuestionAnswering""", """BertForSequenceClassification""", """BertForTokenClassification""", """BertLayer""", """BertLMHeadModel""", """BertModel""", """BertPreTrainedModel""", """load_tf_weights_in_bert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBertEmbeddings""", """TFBertForMaskedLM""", """TFBertForMultipleChoice""", """TFBertForNextSentencePrediction""", """TFBertForPreTraining""", """TFBertForQuestionAnswering""", """TFBertForSequenceClassification""", """TFBertForTokenClassification""", """TFBertLMHeadModel""", """TFBertMainLayer""", """TFBertModel""", """TFBertPreTrainedModel""", ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = ["""TFBertTokenizer"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """FlaxBertForCausalLM""", """FlaxBertForMaskedLM""", """FlaxBertForMultipleChoice""", """FlaxBertForNextSentencePrediction""", """FlaxBertForPreTraining""", """FlaxBertForQuestionAnswering""", """FlaxBertForSequenceClassification""", """FlaxBertForTokenClassification""", """FlaxBertModel""", """FlaxBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
0
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class lowerCAmelCase__ ( a ): """simple docstring""" lowerCAmelCase__ = 42 class lowerCAmelCase__ ( a , a ): """simple docstring""" lowerCAmelCase__ = True @register_to_config def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : int = 3 , __SCREAMING_SNAKE_CASE : Tuple[str] = ("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE : Tuple[str] = ("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE : Tuple[int] = (64,) , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : str = "silu" , __SCREAMING_SNAKE_CASE : int = 4 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : int = 32 , __SCREAMING_SNAKE_CASE : float = 0.18215 , ) -> List[Any]: """simple docstring""" super().__init__() # pass init params to Encoder __SCREAMING_SNAKE_CASE = Encoder( in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , down_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , double_z=__SCREAMING_SNAKE_CASE , ) # pass init params to Decoder __SCREAMING_SNAKE_CASE = Decoder( in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , up_block_types=__SCREAMING_SNAKE_CASE , block_out_channels=__SCREAMING_SNAKE_CASE , layers_per_block=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , act_fn=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) __SCREAMING_SNAKE_CASE = nn.Convad(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False # only relevant if vae tiling is enabled __SCREAMING_SNAKE_CASE = self.config.sample_size __SCREAMING_SNAKE_CASE = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) __SCREAMING_SNAKE_CASE = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) __SCREAMING_SNAKE_CASE = 0.25 def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=False ) -> List[Any]: """simple docstring""" if isinstance(__SCREAMING_SNAKE_CASE , (Encoder, Decoder) ): __SCREAMING_SNAKE_CASE = value def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : bool = True ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = use_tiling def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" self.enable_tiling(__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = True def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __SCREAMING_SNAKE_CASE = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def UpperCAmelCase__ ( self : Dict ) -> Dict[str, AttentionProcessor]: """simple docstring""" __SCREAMING_SNAKE_CASE = {} def fn_recursive_add_processors(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : Dict[str, AttentionProcessor] ): if hasattr(__SCREAMING_SNAKE_CASE , """set_processor""" ): __SCREAMING_SNAKE_CASE = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return processors def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) != count: raise ValueError( f'A dict of processors was passed, but the number of processors {len(__SCREAMING_SNAKE_CASE )} does not match the' f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : torch.nn.Module , __SCREAMING_SNAKE_CASE : Optional[Any] ): if hasattr(__SCREAMING_SNAKE_CASE , """set_processor""" ): if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): module.set_processor(__SCREAMING_SNAKE_CASE ) else: module.set_processor(processor.pop(f'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for name, module in self.named_children(): fn_recursive_attn_processor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> AutoencoderKLOutput: """simple docstring""" if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE ) if self.use_slicing and x.shape[0] > 1: __SCREAMING_SNAKE_CASE = [self.encoder(__SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )] __SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = self.encoder(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.quant_conv(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(__SCREAMING_SNAKE_CASE ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.post_quant_conv(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.decoder(__SCREAMING_SNAKE_CASE ) if not return_dict: return (dec,) return DecoderOutput(sample=__SCREAMING_SNAKE_CASE ) @apply_forward_hook def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" if self.use_slicing and z.shape[0] > 1: __SCREAMING_SNAKE_CASE = [self._decode(__SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )] __SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = self._decode(__SCREAMING_SNAKE_CASE ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = min(a.shape[2] , b.shape[2] , __SCREAMING_SNAKE_CASE ) for y in range(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]: """simple docstring""" __SCREAMING_SNAKE_CASE = min(a.shape[3] , b.shape[3] , __SCREAMING_SNAKE_CASE ) for x in range(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> AutoencoderKLOutput: """simple docstring""" __SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) __SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * self.tile_overlap_factor ) __SCREAMING_SNAKE_CASE = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. __SCREAMING_SNAKE_CASE = [] for i in range(0 , x.shape[2] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [] for j in range(0 , x.shape[3] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] __SCREAMING_SNAKE_CASE = self.encoder(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.quant_conv(__SCREAMING_SNAKE_CASE ) row.append(__SCREAMING_SNAKE_CASE ) rows.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [] for i, row in enumerate(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [] for j, tile in enumerate(__SCREAMING_SNAKE_CASE ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if j > 0: __SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=3 ) ) __SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE , dim=2 ) __SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(__SCREAMING_SNAKE_CASE ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" __SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) __SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * self.tile_overlap_factor ) __SCREAMING_SNAKE_CASE = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. __SCREAMING_SNAKE_CASE = [] for i in range(0 , z.shape[2] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [] for j in range(0 , z.shape[3] , __SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] __SCREAMING_SNAKE_CASE = self.post_quant_conv(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = self.decoder(__SCREAMING_SNAKE_CASE ) row.append(__SCREAMING_SNAKE_CASE ) rows.append(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE = [] for i, row in enumerate(__SCREAMING_SNAKE_CASE ): __SCREAMING_SNAKE_CASE = [] for j, tile in enumerate(__SCREAMING_SNAKE_CASE ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: __SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if j > 0: __SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=3 ) ) __SCREAMING_SNAKE_CASE = torch.cat(__SCREAMING_SNAKE_CASE , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : torch.FloatTensor , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]: """simple docstring""" __SCREAMING_SNAKE_CASE = sample __SCREAMING_SNAKE_CASE = self.encode(__SCREAMING_SNAKE_CASE ).latent_dist if sample_posterior: __SCREAMING_SNAKE_CASE = posterior.sample(generator=__SCREAMING_SNAKE_CASE ) else: __SCREAMING_SNAKE_CASE = posterior.mode() __SCREAMING_SNAKE_CASE = self.decode(__SCREAMING_SNAKE_CASE ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__SCREAMING_SNAKE_CASE )
331
'''simple docstring''' import os def a__ ( a__ = "input.txt" ): """simple docstring""" with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file: __SCREAMING_SNAKE_CASE = [ [int(a__ ) for element in line.split(""",""" )] for line in input_file.readlines() ] __SCREAMING_SNAKE_CASE = len(a__ ) __SCREAMING_SNAKE_CASE = len(matrix[0] ) __SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )] for i in range(a__ ): __SCREAMING_SNAKE_CASE = matrix[i][0] for j in range(1 , a__ ): for i in range(a__ ): __SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , a__ ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): __SCREAMING_SNAKE_CASE = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(f"""{solution() = }""")
331
1
"""simple docstring""" import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class lowerCAmelCase__ ( lowercase ): '''simple docstring''' def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ): _lowerCamelCase : Union[str, Any] = tokenizer _lowerCamelCase : Any = tokenizer.bos_token_id _lowerCamelCase : Optional[int] = dataset _lowerCamelCase : Optional[int] = seq_length _lowerCamelCase : Union[str, Any] = seq_length * chars_per_token * num_of_sequences def __iter__( self ): _lowerCamelCase : Optional[int] = iter(self.dataset ) _lowerCamelCase : Dict = True while more_examples: _lowerCamelCase, _lowerCamelCase : Dict = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowercase )['content'] ) buffer_len += len(buffer[-1] ) except StopIteration: _lowerCamelCase : Optional[Any] = False break _lowerCamelCase : Optional[Any] = tokenizer(lowercase , truncation=lowercase )['input_ids'] _lowerCamelCase : Any = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(lowercase ) , self.seq_length ): _lowerCamelCase : List[str] = all_token_ids[i : i + self.seq_length] if len(lowercase ) == self.seq_length: yield torch.tensor(lowercase ) def _snake_case ( lowercase__ ): _lowerCamelCase : List[Any] = {'streaming': True} _lowerCamelCase : str = load_dataset(args.dataset_name , split='train' , **lowercase__ ) _lowerCamelCase : Dict = ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length ) _lowerCamelCase : str = DataLoader(lowercase__ , batch_size=args.batch_size ) return eval_dataloader def _snake_case ( lowercase__ ): model.eval() _lowerCamelCase : Tuple = [] for step, batch in enumerate(lowercase__ ): with torch.no_grad(): _lowerCamelCase : str = model(lowercase__ , labels=lowercase__ ) _lowerCamelCase : Optional[Any] = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(lowercase__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break _lowerCamelCase : str = torch.mean(torch.cat(lowercase__ ) ) try: _lowerCamelCase : Any = torch.exp(lowercase__ ) except OverflowError: _lowerCamelCase : int = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator lowercase__ = Accelerator() # Parse configuration lowercase__ = HfArgumentParser(EvaluationArguments) lowercase__ = parser.parse_args() set_seed(args.seed) # Logging lowercase__ = logging.getLogger(__name__) logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) # Load model and tokenizer lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt) lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader lowercase__ = create_dataloader(args) # Prepare everything with our `accelerator`. lowercase__ , lowercase__ = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("""Evaluating and saving model after training""") lowercase__ , lowercase__ = evaluate(args) logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}")
96
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase__ = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ """IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """IBertForMaskedLM""", """IBertForMultipleChoice""", """IBertForQuestionAnswering""", """IBertForSequenceClassification""", """IBertForTokenClassification""", """IBertModel""", """IBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
96
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase__ ( __snake_case , unittest.TestCase): UpperCamelCase_ = VideoToVideoSDPipeline UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""}) - {'image', 'width', 'height'} UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""}) - {'image'} UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {'latents'} UpperCamelCase_ = False # No `output_type`. UpperCamelCase_ = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def __A ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) SCREAMING_SNAKE_CASE : Any = DDIMScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) SCREAMING_SNAKE_CASE : Optional[int] = CLIPTextModel(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) SCREAMING_SNAKE_CASE : int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def __A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''A painting of a squirrel eating a burger''', '''video''': video, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def __A ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : Any = VideoToVideoSDPipeline(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(UpperCamelCase__ ) sd_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = '''np''' SCREAMING_SNAKE_CASE : str = sd_pipe(**UpperCamelCase__ ).frames SCREAMING_SNAKE_CASE : Union[str, Any] = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) SCREAMING_SNAKE_CASE : Dict = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __A ( self : List[str] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ , expected_max_diff=5E-3 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __A ( self : Any ): '''simple docstring''' pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def __A ( self : str ): '''simple docstring''' pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def __A ( self : Optional[Any] ): '''simple docstring''' pass def __A ( self : Union[str, Any] ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class lowercase__ ( unittest.TestCase): def __A ( self : int ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = torch.randn((1, 10, 3, 1024, 576) , generator=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = video.to('''cuda''' ) SCREAMING_SNAKE_CASE : Tuple = '''Spiderman is surfing''' SCREAMING_SNAKE_CASE : Dict = pipe(UpperCamelCase__ , video=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=3 , output_type='''pt''' ).frames SCREAMING_SNAKE_CASE : Optional[Any] = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
362
import argparse import torch from transformers import ( UniSpeechSatConfig, UniSpeechSatForAudioFrameClassification, UniSpeechSatForSequenceClassification, UniSpeechSatForXVector, WavaVecaFeatureExtractor, logging, ) logging.set_verbosity_info() __UpperCamelCase : Dict = logging.get_logger(__name__) def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase ) SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.weight'''] SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''projector.bias'''] SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict['''model.post_net.linear.weight'''] SCREAMING_SNAKE_CASE : int = downstream_dict['''model.post_net.linear.bias'''] return model def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : Optional[int] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase ) SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.linear.weight'''] SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias'''] return model def A ( _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : str = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase ) SCREAMING_SNAKE_CASE : str = downstream_dict['''connector.weight'''] SCREAMING_SNAKE_CASE : Dict = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] SCREAMING_SNAKE_CASE : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] SCREAMING_SNAKE_CASE : Any = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] SCREAMING_SNAKE_CASE : Any = downstream_dict['''objective.W'''] return model @torch.no_grad() def A ( _lowercase , _lowercase , _lowercase , _lowercase ): SCREAMING_SNAKE_CASE : List[Any] = torch.load(_lowercase , map_location='''cpu''' ) SCREAMING_SNAKE_CASE : Any = checkpoint['''Downstream'''] SCREAMING_SNAKE_CASE : List[Any] = UniSpeechSatConfig.from_pretrained(_lowercase ) SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained( _lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase ) SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): SCREAMING_SNAKE_CASE : str = convert_classification(_lowercase , _lowercase , _lowercase ) elif arch.endswith('''ForAudioFrameClassification''' ): SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(_lowercase , _lowercase , _lowercase ) elif arch.endswith('''ForXVector''' ): SCREAMING_SNAKE_CASE : int = convert_xvector(_lowercase , _lowercase , _lowercase ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: SCREAMING_SNAKE_CASE : int = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(_lowercase ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": __UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __UpperCamelCase : Union[str, Any] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
258
0
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = BertJapaneseTokenizer SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : str = True def snake_case__( self : str ) ->Tuple: super().setUp() snake_case_ = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは''', '''世界''', '''##世界''', '''、''', '''##、''', '''。''', '''##。''', ] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def snake_case__( self : Optional[Any] , _UpperCamelCase : List[Any] ) ->List[str]: snake_case_ = '''こんにちは、世界。 \nこんばんは、世界。''' snake_case_ = '''こんにちは 、 世界 。 こんばんは 、 世界 。''' return input_text, output_text def snake_case__( self : Optional[Any] , _UpperCamelCase : Dict ) ->Tuple: snake_case_, snake_case_ = self.get_input_output_texts(_UpperCamelCase ) snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return text, ids def snake_case__( self : Any ) ->Dict: pass # TODO add if relevant def snake_case__( self : Optional[Any] ) ->Optional[Any]: pass # TODO add if relevant def snake_case__( self : Optional[Any] ) ->Any: pass # TODO add if relevant def snake_case__( self : Optional[int] ) ->int: snake_case_ = self.tokenizer_class(self.vocab_file ) snake_case_ = tokenizer.tokenize('''こんにちは、世界。\nこんばんは、世界。''' ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) def snake_case__( self : Dict ) ->Any: snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''mecab''' ) self.assertIsNotNone(_UpperCamelCase ) snake_case_ = '''こんにちは、世界。\nこんばんは、世界。''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , '''rb''' ) as handle: snake_case_ = pickle.load(_UpperCamelCase ) snake_case_ = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) def snake_case__( self : List[Any] ) ->Tuple: snake_case_ = MecabTokenizer(mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : int ) ->List[Any]: try: snake_case_ = MecabTokenizer(mecab_dic='''unidic_lite''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : Union[str, Any] ) ->str: try: snake_case_ = MecabTokenizer(mecab_dic='''unidic''' ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : List[str] ) ->Dict: snake_case_ = MecabTokenizer(do_lower_case=_UpperCamelCase , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iphone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) def snake_case__( self : Optional[int] ) ->List[str]: try: snake_case_ = MecabTokenizer( do_lower_case=_UpperCamelCase , normalize_text=_UpperCamelCase , mecab_option='''-d /usr/local/lib/mecab/dic/jumandic''' ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) def snake_case__( self : Optional[int] ) ->Union[str, Any]: snake_case_ = MecabTokenizer(normalize_text=_UpperCamelCase , mecab_dic='''ipadic''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップルストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。'''] , ) @require_sudachi def snake_case__( self : Optional[Any] ) ->str: snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''sudachi''' ) self.assertIsNotNone(_UpperCamelCase ) snake_case_ = '''こんにちは、世界。\nこんばんは、世界。''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , '''rb''' ) as handle: snake_case_ = pickle.load(_UpperCamelCase ) snake_case_ = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_sudachi def snake_case__( self : Tuple ) ->Optional[int]: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def snake_case__( self : str ) ->Tuple: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''A''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国''', '''人''', '''参政''', '''権'''] ) @require_sudachi def snake_case__( self : Dict ) ->List[Any]: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''B''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人''', '''参政権'''] ) @require_sudachi def snake_case__( self : Optional[int] ) ->Tuple: snake_case_ = SudachiTokenizer(sudachi_dict_type='''core''' , sudachi_split_mode='''C''' ) self.assertListEqual(tokenizer.tokenize('''外国人参政権''' ) , ['''外国人参政権'''] ) @require_sudachi def snake_case__( self : Optional[Any] ) ->int: snake_case_ = SudachiTokenizer(do_lower_case=_UpperCamelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', ''' ''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def snake_case__( self : Dict ) ->List[str]: snake_case_ = SudachiTokenizer(normalize_text=_UpperCamelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , [''' ''', '''\t''', '''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', ''' ''', '''が''', ''' ''', ''' ''', '''\n ''', '''発売''', '''さ''', '''れ''', '''た''', '''\u3000''', '''。''', ''' ''', ''' '''] , ) @require_sudachi def snake_case__( self : List[str] ) ->List[Any]: snake_case_ = SudachiTokenizer(trim_whitespace=_UpperCamelCase , sudachi_dict_type='''core''' ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れ''', '''た''', '''。'''] , ) @require_jumanpp def snake_case__( self : int ) ->Union[str, Any]: snake_case_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type='''jumanpp''' ) self.assertIsNotNone(_UpperCamelCase ) snake_case_ = '''こんにちは、世界。\nこんばんは、世界。''' snake_case_ = tokenizer.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , ['''こんにちは''', '''、''', '''世界''', '''。''', '''こん''', '''##ばんは''', '''、''', '''世界''', '''。'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] ) snake_case_ = os.path.join(self.tmpdirname , '''tokenizer.bin''' ) with open(_UpperCamelCase , '''wb''' ) as handle: pickle.dump(_UpperCamelCase , _UpperCamelCase ) with open(_UpperCamelCase , '''rb''' ) as handle: snake_case_ = pickle.load(_UpperCamelCase ) snake_case_ = tokenizer_new.tokenize(_UpperCamelCase ) self.assertListEqual(_UpperCamelCase , _UpperCamelCase ) @require_jumanpp def snake_case__( self : List[str] ) ->Dict: snake_case_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def snake_case__( self : Any ) ->Any: snake_case_ = JumanppTokenizer(do_lower_case=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iphone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def snake_case__( self : int ) ->Dict: snake_case_ = JumanppTokenizer(normalize_text=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''ア''', '''ッ''', '''フ''', '''゚''', '''ル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''\u3000''', '''が''', '''\u3000''', '''\u3000''', '''\u3000''', '''発売''', '''さ''', '''れた''', '''\u3000''', '''。'''] , ) @require_jumanpp def snake_case__( self : int ) ->Optional[Any]: snake_case_ = JumanppTokenizer(trim_whitespace=_UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(''' \tアップルストアでiPhone8 が \n 発売された 。 ''' ) , ['''アップル''', '''ストア''', '''で''', '''iPhone''', '''8''', '''が''', '''発売''', '''さ''', '''れた''', '''。'''] , ) @require_jumanpp def snake_case__( self : Any ) ->Optional[int]: snake_case_ = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize('''ありがとうございますm(_ _)m見つけるのが大変です。''' ) , ['''ありがとう''', '''ございます''', '''m(_ _)m''', '''見つける''', '''の''', '''が''', '''大変です''', '''。'''] , ) def snake_case__( self : Any ) ->List[Any]: snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こんにちは''', '''こん''', '''にちは''', '''ばんは''', '''##こん''', '''##にちは''', '''##ばんは'''] snake_case_ = {} for i, token in enumerate(_UpperCamelCase ): snake_case_ = i snake_case_ = WordpieceTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こんにちは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは''' ) , ['''こん''', '''##ばんは'''] ) self.assertListEqual(tokenizer.tokenize('''こんばんは こんばんにちは こんにちは''' ) , ['''こん''', '''##ばんは''', '''[UNK]''', '''こんにちは'''] ) def snake_case__( self : Optional[Any] ) ->Optional[int]: snake_case_ = BertJapaneseTokenizer.from_pretrained('''nlp-waseda/roberta-base-japanese-with-auto-jumanpp''' ) snake_case_ = tokenizer.subword_tokenizer snake_case_ = subword_tokenizer.tokenize('''国境 の 長い トンネル を 抜ける と 雪国 であった 。''' ) self.assertListEqual(_UpperCamelCase , ['''▁国境''', '''▁の''', '''▁長い''', '''▁トンネル''', '''▁を''', '''▁抜ける''', '''▁と''', '''▁雪''', '''国''', '''▁であった''', '''▁。'''] ) snake_case_ = subword_tokenizer.tokenize('''こんばんは こんばん にち は こんにちは''' ) self.assertListEqual(_UpperCamelCase , ['''▁こん''', '''ばん''', '''は''', '''▁こん''', '''ばん''', '''▁に''', '''ち''', '''▁は''', '''▁こんにちは'''] ) def snake_case__( self : str ) ->Tuple: snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese''' ) snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = BertJapaneseTokenizer SCREAMING_SNAKE_CASE : int = False def snake_case__( self : List[str] ) ->int: super().setUp() snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def snake_case__( self : Optional[Any] , **_UpperCamelCase : Union[str, Any] ) ->int: return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='''character''' , **_UpperCamelCase ) def snake_case__( self : Any , _UpperCamelCase : Union[str, Any] ) ->List[Any]: snake_case_ = '''こんにちは、世界。 \nこんばんは、世界。''' snake_case_ = '''こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。''' return input_text, output_text def snake_case__( self : Dict ) ->Union[str, Any]: pass # TODO add if relevant def snake_case__( self : Any ) ->Union[str, Any]: pass # TODO add if relevant def snake_case__( self : Tuple ) ->Tuple: pass # TODO add if relevant def snake_case__( self : List[Any] ) ->int: snake_case_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='''character''' ) snake_case_ = tokenizer.tokenize('''こんにちは、世界。 \nこんばんは、世界。''' ) self.assertListEqual( _UpperCamelCase , ['''こ''', '''ん''', '''に''', '''ち''', '''は''', '''、''', '''世''', '''界''', '''。''', '''こ''', '''ん''', '''ば''', '''ん''', '''は''', '''、''', '''世''', '''界''', '''。'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] ) def snake_case__( self : List[str] ) ->List[str]: snake_case_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''こ''', '''ん''', '''に''', '''ち''', '''は''', '''ば''', '''世''', '''界''', '''、''', '''。'''] snake_case_ = {} for i, token in enumerate(_UpperCamelCase ): snake_case_ = i snake_case_ = CharacterTokenizer(vocab=_UpperCamelCase , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''こんにちは''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''は'''] ) self.assertListEqual(tokenizer.tokenize('''こんにちほ''' ) , ['''こ''', '''ん''', '''に''', '''ち''', '''[UNK]'''] ) def snake_case__( self : Dict ) ->Tuple: snake_case_ = self.tokenizer_class.from_pretrained('''cl-tohoku/bert-base-japanese-char''' ) snake_case_ = tokenizer.encode('''ありがとう。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.encode('''どういたしまして。''' , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase ) snake_case_ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : str ) ->int: snake_case_ = '''cl-tohoku/bert-base-japanese''' snake_case_ = AutoTokenizer.from_pretrained(_UpperCamelCase ) self.assertIsInstance(_UpperCamelCase , _UpperCamelCase ) class snake_case_ ( unittest.TestCase ): '''simple docstring''' def snake_case__( self : Optional[int] ) ->Dict: snake_case_ = '''cl-tohoku/bert-base-japanese''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) ) snake_case_ = '''bert-base-cased''' with self.assertLogs('''transformers''' , level='''WARNING''' ) as cm: BertJapaneseTokenizer.from_pretrained(_UpperCamelCase ) self.assertTrue( cm.records[0].message.startswith( '''The tokenizer class you load from this checkpoint is not the same type as the class this function''' ''' is called from.''' ) )
8
"""simple docstring""" import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ = False, False, False @dataclass class lowercase : _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = None # Automatically constructed _SCREAMING_SNAKE_CASE = "dict" _SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} ) _SCREAMING_SNAKE_CASE = field(default='Audio' , init=_UpperCAmelCase , repr=_UpperCAmelCase ) def __call__( self ) -> Union[str, Any]: return self.pa_type def _snake_case ( self , lowercase ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("""To support encoding audio data, please install 'soundfile'.""" ) from err if isinstance(lowercase , lowercase ): return {"bytes": None, "path": value} elif isinstance(lowercase , lowercase ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes lowerCAmelCase = BytesIO() sf.write(lowercase , value["""array"""] , value["""sampling_rate"""] , format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("""pcm""" ): # "PCM" only has raw audio bytes if value.get("""sampling_rate""" ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""" ) if value.get("""bytes""" ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) lowerCAmelCase = np.frombuffer(value["""bytes"""] , dtype=np.intaa ).astype(np.floataa ) / 32_767 else: lowerCAmelCase = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""" ).astype(np.floataa ) / 32_767 lowerCAmelCase = BytesIO(bytes() ) sf.write(lowercase , lowercase , value["""sampling_rate"""] , format="""wav""" ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("""path""" )} elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )} else: raise ValueError( f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' ) def _snake_case ( self , lowercase , lowercase = None ) -> dict: if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""" ) lowerCAmelCase , lowerCAmelCase = (value["""path"""], BytesIO(value["""bytes"""] )) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""" ) from err lowerCAmelCase = xsplitext(lowercase )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( """Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( """Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """ ) if file is None: lowerCAmelCase = token_per_repo_id or {} lowerCAmelCase = path.split("""::""" )[-1] try: lowerCAmelCase = string_to_dict(lowercase , config.HUB_DATASETS_URL )["""repo_id"""] lowerCAmelCase = token_per_repo_id[repo_id] except (ValueError, KeyError): lowerCAmelCase = None with xopen(lowercase , """rb""" , use_auth_token=lowercase ) as f: lowerCAmelCase , lowerCAmelCase = sf.read(lowercase ) else: lowerCAmelCase , lowerCAmelCase = sf.read(lowercase ) lowerCAmelCase = array.T if self.mono: lowerCAmelCase = librosa.to_mono(lowercase ) if self.sampling_rate and self.sampling_rate != sampling_rate: lowerCAmelCase = librosa.resample(lowercase , orig_sr=lowercase , target_sr=self.sampling_rate ) lowerCAmelCase = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def _snake_case ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("""Cannot flatten a decoded Audio feature.""" ) return { "bytes": Value("""binary""" ), "path": Value("""string""" ), } def _snake_case ( self , lowercase ) -> pa.StructArray: if pa.types.is_string(storage.type ): lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() ) lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("""array""" ): lowerCAmelCase = pa.array([Audio().encode_example(lowercase ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index("""bytes""" ) >= 0: lowerCAmelCase = storage.field("""bytes""" ) else: lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.binary() ) if storage.type.get_field_index("""path""" ) >= 0: lowerCAmelCase = storage.field("""path""" ) else: lowerCAmelCase = pa.array([None] * len(lowercase ) , type=pa.string() ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() ) return array_cast(lowercase , self.pa_type ) def _snake_case ( self , lowercase ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(lowercase ): with xopen(lowercase , """rb""" ) as f: lowerCAmelCase = f.read() return bytes_ lowerCAmelCase = pa.array( [ (path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) lowerCAmelCase = pa.array( [os.path.basename(lowercase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , ) lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() ) return array_cast(lowercase , self.pa_type )
46
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ): _UpperCamelCase : Dict = LDMTextToImagePipeline _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } _UpperCamelCase : Optional[int] = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } _UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS _UpperCamelCase : Optional[Any] = False def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) lowercase__ = CLIPTextModel(a ) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vqvae': vae, 'bert': text_encoder, 'tokenizer': tokenizer, } return components def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[int] , a : Optional[Any]=0 )-> Union[str, Any]: """simple docstring""" if str(a ).startswith('mps' ): lowercase__ = torch.manual_seed(a ) else: lowercase__ = torch.Generator(device=a ).manual_seed(a ) lowercase__ = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any: """simple docstring""" lowercase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = LDMTextToImagePipeline(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = self.get_dummy_inputs(a ) lowercase__ = pipe(**a ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) lowercase__ = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : Any=torch.floataa , a : Any=0 )-> Any: """simple docstring""" lowercase__ = torch.manual_seed(a ) lowercase__ = np.random.RandomState(a ).standard_normal((1, 4, 32, 32) ) lowercase__ = torch.from_numpy(a ).to(device=a , dtype=a ) lowercase__ = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict: """simple docstring""" lowercase__ = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = self.get_inputs(a ) lowercase__ = pipe(**a ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) lowercase__ = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) lowercase__ = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE (unittest.TestCase ): def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : Optional[int]=torch.floataa , a : List[Any]=0 )-> List[str]: """simple docstring""" lowercase__ = torch.manual_seed(a ) lowercase__ = np.random.RandomState(a ).standard_normal((1, 4, 32, 32) ) lowercase__ = torch.from_numpy(a ).to(device=a , dtype=a ) lowercase__ = { 'prompt': 'A painting of a squirrel eating a burger', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]: """simple docstring""" lowercase__ = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ = self.get_inputs(a ) lowercase__ = pipe(**a ).images[0] lowercase__ = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) lowercase__ = np.abs(expected_image - image ).max() assert max_diff < 1E-3
269
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowercase_ = logging.getLogger(__name__) @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) _UpperCamelCase : Optional[str] = field( default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) _UpperCamelCase : bool = field(default=UpperCAmelCase , metadata={'help': 'Set this flag to use fast tokenization.'} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) @dataclass class SCREAMING_SNAKE_CASE : _UpperCamelCase : str = field( metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} ) _UpperCamelCase : Optional[str] = field( default=UpperCAmelCase , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , ) _UpperCamelCase : int = field( default=1_28 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) _UpperCamelCase : bool = field( default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} ) def __UpperCamelCase () -> str: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ' --overwrite_output_dir to overcome.' ) lowercase__ = import_module('tasks' ) try: lowercase__ = getattr(_SCREAMING_SNAKE_CASE , model_args.task_type ) lowercase__ = token_classification_task_clazz() except AttributeError: raise ValueError( F"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ F"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowercase__ = token_classification_task.get_labels(data_args.labels ) lowercase__ = dict(enumerate(_SCREAMING_SNAKE_CASE ) ) lowercase__ = len(_SCREAMING_SNAKE_CASE ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid={label: i for i, label in enumerate(_SCREAMING_SNAKE_CASE )} , cache_dir=model_args.cache_dir , ) lowercase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowercase__ = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # Get datasets lowercase__ = ( TokenClassificationDataset( token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowercase__ = ( TokenClassificationDataset( token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[List[int], List[int]]: lowercase__ = np.argmax(_SCREAMING_SNAKE_CASE , axis=2 ) lowercase__ , lowercase__ = preds.shape lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )] lowercase__ = [[] for _ in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(_SCREAMING_SNAKE_CASE ) -> Dict: lowercase__ , lowercase__ = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "precision": precision_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "recall": recall_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "f1": fa_score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), } # Data collator lowercase__ = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowercase__ = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase__ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) lowercase__ = trainer.evaluate() lowercase__ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) writer.write('%s = %s\n' % (key, value) ) results.update(_SCREAMING_SNAKE_CASE ) # Predict if training_args.do_predict: lowercase__ = TokenClassificationDataset( token_classification_task=_SCREAMING_SNAKE_CASE , data_dir=data_args.data_dir , tokenizer=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowercase__ , lowercase__ , lowercase__ = trainer.predict(_SCREAMING_SNAKE_CASE ) lowercase__ , lowercase__ = align_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase__ = os.path.join(training_args.output_dir , 'test_results.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: for key, value in metrics.items(): logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) writer.write('%s = %s\n' % (key, value) ) # Save predictions lowercase__ = os.path.join(training_args.output_dir , 'test_predictions.txt' ) if trainer.is_world_process_zero(): with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer: with open(os.path.join(data_args.data_dir , 'test.txt' ) , 'r' ) as f: token_classification_task.write_predictions_to_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return results def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
269
1
"""simple docstring""" def UpperCAmelCase__ ( lowerCAmelCase__ :float ) -> float: '''simple docstring''' return 1_0 - x * x def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float: '''simple docstring''' if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) >= 0: raise ValueError("""Wrong space!""" ) lowercase = a while (b - a) >= 0.01: # Find middle point lowercase = (a + b) / 2 # Check if middle point is root if equation(lowerCAmelCase__ ) == 0.0: break # Decide the side to repeat the steps if equation(lowerCAmelCase__ ) * equation(lowerCAmelCase__ ) < 0: lowercase = c else: lowercase = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
197
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : List[Any] =logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] ={ """s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""", } class _A ( lowerCAmelCase ): snake_case__ : Union[str, Any] = 'open-llama' def __init__( self , __lowerCAmelCase=10_0000 , __lowerCAmelCase=4096 , __lowerCAmelCase=1_1008 , __lowerCAmelCase=32 , __lowerCAmelCase=32 , __lowerCAmelCase="silu" , __lowerCAmelCase=2048 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=True , __lowerCAmelCase=0 , __lowerCAmelCase=1 , __lowerCAmelCase=2 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ): """simple docstring""" lowercase = vocab_size lowercase = max_position_embeddings lowercase = hidden_size lowercase = intermediate_size lowercase = num_hidden_layers lowercase = num_attention_heads lowercase = hidden_act lowercase = initializer_range lowercase = rms_norm_eps lowercase = use_cache lowercase = kwargs.pop( """use_memorry_efficient_attention""" , __lowerCAmelCase ) lowercase = hidden_dropout_prob lowercase = attention_dropout_prob lowercase = use_stable_embedding lowercase = shared_input_output_embedding lowercase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , tie_word_embeddings=__lowerCAmelCase , **__lowerCAmelCase , ) def A__ ( self ): """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __lowerCAmelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ f'got {self.rope_scaling}' ) lowercase = self.rope_scaling.get("""type""" , __lowerCAmelCase ) lowercase = self.rope_scaling.get("""factor""" , __lowerCAmelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
197
1
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ): lowercase :Optional[int] = len(lowerCamelCase ) lowercase :int = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )] # for each arr value, a sum of zero(0) can be formed by not taking any element # hence True/1 for i in range(arr_len + 1 ): lowercase :Union[str, Any] = True # sum is not zero and set is empty then false for i in range(1, required_sum + 1 ): lowercase :Optional[int] = False for i in range(1, arr_len + 1 ): for j in range(1, required_sum + 1 ): if arr[i - 1] > j: lowercase :Optional[int] = subset[i - 1][j] if arr[i - 1] <= j: lowercase :List[str] = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]] return subset[arr_len][required_sum] if __name__ == "__main__": import doctest doctest.testmod()
158
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( lowerCAmelCase): _a = (DDIMParallelScheduler,) _a = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def SCREAMING_SNAKE_CASE ( self: Any , **_lowerCAmelCase: Optional[Any] ): lowercase :List[Any] = { "num_train_timesteps": 10_00, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", "clip_sample": True, } config.update(**_lowerCAmelCase ) return config def SCREAMING_SNAKE_CASE ( self: str , **_lowerCAmelCase: Any ): lowercase :Optional[int] = self.scheduler_classes[0] lowercase :Dict = self.get_scheduler_config(**_lowerCAmelCase ) lowercase :List[str] = scheduler_class(**_lowerCAmelCase ) lowercase , lowercase :str = 10, 0.0 lowercase :List[Any] = self.dummy_model() lowercase :int = self.dummy_sample_deter scheduler.set_timesteps(_lowerCAmelCase ) for t in scheduler.timesteps: lowercase :Optional[int] = model(_lowerCAmelCase , _lowerCAmelCase ) lowercase :Dict = scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample return sample def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=_lowerCAmelCase ) lowercase :Optional[Any] = self.scheduler_classes[0] lowercase :List[str] = self.get_scheduler_config(steps_offset=1 ) lowercase :Optional[int] = scheduler_class(**_lowerCAmelCase ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) ) def SCREAMING_SNAKE_CASE ( self: Tuple ): for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): for clip_sample in [True, False]: self.check_over_configs(clip_sample=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ): for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Optional[int] ): for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Dict ): self.check_over_configs(thresholding=_lowerCAmelCase ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , ) def SCREAMING_SNAKE_CASE ( self: str ): for t in [1, 10, 49]: self.check_over_forward(time_step=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ): self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: str ): for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: str ): lowercase :Dict = self.scheduler_classes[0] lowercase :Tuple = self.get_scheduler_config() lowercase :Optional[Any] = scheduler_class(**_lowerCAmelCase ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_47_71 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_24_60 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_09_79 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1e-5 def SCREAMING_SNAKE_CASE ( self: List[str] ): lowercase :Union[str, Any] = self.scheduler_classes[0] lowercase :Union[str, Any] = self.get_scheduler_config() lowercase :Union[str, Any] = scheduler_class(**_lowerCAmelCase ) lowercase , lowercase :Union[str, Any] = 10, 0.0 scheduler.set_timesteps(_lowerCAmelCase ) lowercase :Dict = self.dummy_model() lowercase :Dict = self.dummy_sample_deter lowercase :Union[str, Any] = self.dummy_sample_deter + 0.1 lowercase :int = self.dummy_sample_deter - 0.1 lowercase :Dict = samplea.shape[0] lowercase :Tuple = torch.stack([samplea, samplea, samplea] , dim=0 ) lowercase :Optional[Any] = torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase ) lowercase :Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) lowercase :Optional[int] = scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase ) lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) ) lowercase :Optional[int] = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2 assert abs(result_mean.item() - 0.49_82 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :int = self.full_loop() lowercase :Optional[int] = torch.sum(torch.abs(_lowerCAmelCase ) ) lowercase :Any = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2 assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self: Any ): lowercase :Dict = self.full_loop(prediction_type="v_prediction" ) lowercase :int = torch.sum(torch.abs(_lowerCAmelCase ) ) lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 52.53_02 ) < 1e-2 assert abs(result_mean.item() - 0.06_84 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self: Optional[int] ): # We specify different beta, so that the first alpha is 0.99 lowercase :List[Any] = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 ) lowercase :List[Any] = torch.sum(torch.abs(_lowerCAmelCase ) ) lowercase :Union[str, Any] = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2 assert abs(result_mean.item() - 0.19_51 ) < 1e-3 def SCREAMING_SNAKE_CASE ( self: Any ): # We specify different beta, so that the first alpha is 0.99 lowercase :Tuple = self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 ) lowercase :str = torch.sum(torch.abs(_lowerCAmelCase ) ) lowercase :List[str] = torch.mean(torch.abs(_lowerCAmelCase ) ) assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2 assert abs(result_mean.item() - 0.19_41 ) < 1e-3
158
1
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' with open(lowerCAmelCase_ ) as metadata_file: __SCREAMING_SNAKE_CASE = json.load(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=lowerCAmelCase_ , **metadata["model_config"] ) # Load in the weights from the checkpoint_path __SCREAMING_SNAKE_CASE = torch.load(lowerCAmelCase_ , map_location="cpu" ) # Load the entity vocab file __SCREAMING_SNAKE_CASE = load_entity_vocab(lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks __SCREAMING_SNAKE_CASE = AddedToken("<ent>" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = AddedToken("<ent2>" , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" ) tokenizer.save_pretrained(lowerCAmelCase_ ) with open(os.path.join(lowerCAmelCase_ , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) __SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(lowerCAmelCase_ ) # Initialize the embeddings of the special tokens __SCREAMING_SNAKE_CASE = state_dict["embeddings.word_embeddings.weight"] __SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) __SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) __SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __SCREAMING_SNAKE_CASE = f"""encoder.layer.{layer_index}.attention.self.""" __SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] __SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] __SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __SCREAMING_SNAKE_CASE = state_dict["entity_embeddings.entity_embeddings.weight"] __SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["[MASK]"]] __SCREAMING_SNAKE_CASE = LukeModel(config=lowerCAmelCase_ ).eval() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) if not (len(lowerCAmelCase_ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(f"""Missing keys {', '.join(lowerCAmelCase_ )}. Expected only missing embeddings.position_ids""" ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" f""" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}""" ) # Check outputs __SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(lowerCAmelCase_ , task="entity_classification" ) __SCREAMING_SNAKE_CASE = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) __SCREAMING_SNAKE_CASE = (39, 42) __SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , entity_spans=[span] , add_prefix_space=lowerCAmelCase_ , return_tensors="pt" ) __SCREAMING_SNAKE_CASE = model(**lowerCAmelCase_ ) # Verify word hidden states if model_size == "large": __SCREAMING_SNAKE_CASE = torch.Size((1, 42, 1024) ) __SCREAMING_SNAKE_CASE = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base __SCREAMING_SNAKE_CASE = torch.Size((1, 42, 768) ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": __SCREAMING_SNAKE_CASE = torch.Size((1, 1, 1024) ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base __SCREAMING_SNAKE_CASE = torch.Size((1, 1, 768) ) __SCREAMING_SNAKE_CASE = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is""" f""" {expected_shape}""" ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(lowerCAmelCase_ ) ) model.save_pretrained(lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = {} with open(lowerCAmelCase_ , "r" , encoding="utf-8" ) as f: for index, line in enumerate(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = line.rstrip().split("\t" ) __SCREAMING_SNAKE_CASE = index return entity_vocab if __name__ == "__main__": a__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''') parser.add_argument( '''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.''' ) parser.add_argument( '''--entity_vocab_path''', default=None, type=str, help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.''' ) parser.add_argument( '''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.''' ) a__ : int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
54
"""simple docstring""" class A_ : """simple docstring""" def __init__( self :List[Any] , lowercase_ :int ) -> None: UpperCAmelCase = size UpperCAmelCase = [0] * size UpperCAmelCase = [0] * size @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return index | (index + 1) @staticmethod def UpperCAmelCase__ ( lowercase_ :int ) -> int: return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self :Any , lowercase_ :int , lowercase_ :int ) -> None: UpperCAmelCase = value while index < self.size: UpperCAmelCase = self.get_prev(lowercase_ ) + 1 if current_left_border == index: UpperCAmelCase = value else: UpperCAmelCase = max(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = self.get_next(lowercase_ ) def UpperCAmelCase__ ( self :List[str] , lowercase_ :int , lowercase_ :int ) -> int: right -= 1 # Because of right is exclusive UpperCAmelCase = 0 while left <= right: UpperCAmelCase = self.get_prev(lowercase_ ) if left <= current_left: UpperCAmelCase = max(lowercase_ , self.tree[right] ) UpperCAmelCase = current_left else: UpperCAmelCase = max(lowercase_ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
78
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin a_ : Tuple = False @skip_mps class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase : Union[str, Any] =StableDiffusionAttendAndExcitePipeline lowercase : Tuple =False lowercase : Optional[Any] =TEXT_TO_IMAGE_PARAMS lowercase : Dict =TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} ) lowercase : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS lowercase : Optional[Any] =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def lowercase__ ( cls ): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(lowerCAmelCase ) @classmethod def lowercase__ ( cls ): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" torch.manual_seed(0 ) lowerCamelCase_ =UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase, ) lowerCamelCase_ =DDIMScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''', clip_sample=lowerCAmelCase, set_alpha_to_one=lowerCAmelCase, ) torch.manual_seed(0 ) lowerCamelCase_ =AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, sample_size=128, ) torch.manual_seed(0 ) lowerCamelCase_ =CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, hidden_act='''gelu''', projection_dim=512, ) lowerCamelCase_ =CLIPTextModel(lowerCAmelCase ) lowerCamelCase_ =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowerCamelCase_ ={ '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=0 ): """simple docstring""" if str(lowerCAmelCase ).startswith('''mps''' ): lowerCamelCase_ =torch.manual_seed(lowerCAmelCase ) else: lowerCamelCase_ =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) lowerCamelCase_ =lowerCamelCase_ ={ '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ ='''cpu''' lowerCamelCase_ =self.get_dummy_components() lowerCamelCase_ =self.pipeline_class(**lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCamelCase_ =self.get_dummy_inputs(lowerCAmelCase ) lowerCamelCase_ =pipe(**lowerCAmelCase ).images lowerCamelCase_ =image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 64, 64, 3) ) lowerCamelCase_ =np.array( [0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] ) lowerCamelCase_ =np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase, 1e-3 ) def lowercase__ ( self ): """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowercase__ ( self ): """simple docstring""" self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7e-4 ) def lowercase__ ( self ): """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def lowercase__ ( self ): """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def lowercase__ ( self ): """simple docstring""" super().test_save_load_local(expected_max_difference=5e-4 ) def lowercase__ ( self ): """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class __UpperCamelCase ( unittest.TestCase ): @classmethod def lowercase__ ( cls ): """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(lowerCAmelCase ) @classmethod def lowercase__ ( cls ): """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self ): """simple docstring""" lowerCamelCase_ =torch.manual_seed(51 ) lowerCamelCase_ =StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa ) pipe.to('''cuda''' ) lowerCamelCase_ ='''a painting of an elephant with glasses''' lowerCamelCase_ =[5, 7] lowerCamelCase_ =pipe( prompt=lowerCAmelCase, token_indices=lowerCAmelCase, guidance_scale=7.5, generator=lowerCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type='''numpy''', ).images[0] lowerCamelCase_ =load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5e-1
6
'''simple docstring''' from collections import defaultdict from math import gcd def a_ ( __snake_case : int = 150_0000 ) -> int: """simple docstring""" lowerCamelCase_ =defaultdict(__snake_case ) lowerCamelCase_ =2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , __snake_case , 2 ): if gcd(__snake_case , __snake_case ) > 1: continue lowerCamelCase_ =2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(__snake_case , limit + 1 , __snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
6
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { 'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json', 'BridgeTower/bridgetower-base-itm-mlm': ( 'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json' ), } class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = """bridgetower_vision_model""" def __init__( self , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=3 , lowerCAmelCase=16 , lowerCAmelCase=288 , lowerCAmelCase=1 , lowerCAmelCase=1e-05 , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=False , **lowerCAmelCase , ) -> Dict: '''simple docstring''' super().__init__(**lowerCAmelCase ) _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_channels _lowercase =patch_size _lowercase =image_size _lowercase =initializer_factor _lowercase =layer_norm_eps _lowercase =stop_gradient _lowercase =share_layernorm _lowercase =remove_last_layer @classmethod def A__ ( cls , lowerCAmelCase , **lowerCAmelCase ) -> "PretrainedConfig": '''simple docstring''' _lowercase , _lowercase =cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase ) if config_dict.get('model_type' ) == "bridgetower": _lowercase =config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase , **lowerCAmelCase ) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = """bridgetower_text_model""" def __init__( self , lowerCAmelCase=50_265 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=1 , lowerCAmelCase=3_072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=514 , lowerCAmelCase=1 , lowerCAmelCase=1e-05 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , **lowerCAmelCase , ) -> Any: '''simple docstring''' super().__init__(**lowerCAmelCase ) _lowercase =vocab_size _lowercase =hidden_size _lowercase =num_hidden_layers _lowercase =num_attention_heads _lowercase =hidden_act _lowercase =initializer_factor _lowercase =intermediate_size _lowercase =hidden_dropout_prob _lowercase =attention_probs_dropout_prob _lowercase =max_position_embeddings _lowercase =type_vocab_size _lowercase =layer_norm_eps _lowercase =position_embedding_type _lowercase =use_cache _lowercase =pad_token_id _lowercase =bos_token_id _lowercase =eos_token_id @classmethod def A__ ( cls , lowerCAmelCase , **lowerCAmelCase ) -> "PretrainedConfig": '''simple docstring''' _lowercase , _lowercase =cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase ) if config_dict.get('model_type' ) == "bridgetower": _lowercase =config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(lowerCAmelCase , **lowerCAmelCase ) class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ): _a = """bridgetower""" def __init__( self , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=768 , lowerCAmelCase=1 , lowerCAmelCase=1e-05 , lowerCAmelCase=False , lowerCAmelCase="add" , lowerCAmelCase=12 , lowerCAmelCase=6 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=None , lowerCAmelCase=None , **lowerCAmelCase , ) -> Optional[Any]: '''simple docstring''' _lowercase =kwargs.pop('text_config_dict' , lowerCAmelCase ) _lowercase =kwargs.pop('vision_config_dict' , lowerCAmelCase ) super().__init__(**lowerCAmelCase ) _lowercase =share_cross_modal_transformer_layers _lowercase =hidden_act _lowercase =hidden_size _lowercase =initializer_factor _lowercase =layer_norm_eps _lowercase =share_link_tower_layers _lowercase =link_tower_type _lowercase =num_attention_heads _lowercase =num_hidden_layers _lowercase =tie_word_embeddings _lowercase =init_layernorm_from_vision_encoder if text_config is None: _lowercase ={} logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' ) if vision_config is None: _lowercase ={} logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' ) _lowercase =BridgeTowerTextConfig(**lowerCAmelCase ) _lowercase =BridgeTowerVisionConfig(**lowerCAmelCase ) @classmethod def A__ ( cls , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowerCAmelCase ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' _lowercase =copy.deepcopy(self.__dict__ ) _lowercase =self.text_config.to_dict() _lowercase =self.vision_config.to_dict() _lowercase =self.__class__.model_type return output
205
import argparse import math import traceback import dateutil.parser as date_parser import requests def a ( A__ : Dict ) -> str: """simple docstring""" _lowercase ={} _lowercase =job['started_at'] _lowercase =job['completed_at'] _lowercase =date_parser.parse(A__ ) _lowercase =date_parser.parse(A__ ) _lowercase =round((end_datetime - start_datetime).total_seconds() / 60.0 ) _lowercase =start _lowercase =end _lowercase =duration_in_min return job_info def a ( A__ : Dict , A__ : str=None ) -> Tuple: """simple docstring""" _lowercase =None if token is not None: _lowercase ={'Accept': 'application/vnd.github+json', 'Authorization': F'''Bearer {token}'''} _lowercase =F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' _lowercase =requests.get(A__ , headers=A__ ).json() _lowercase ={} try: job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} ) _lowercase =math.ceil((result['total_count'] - 100) / 100 ) for i in range(A__ ): _lowercase =requests.get(url + F'''&page={i + 2}''' , headers=A__ ).json() job_time.update({job['name']: extract_time_from_single_job(A__ ) for job in result['jobs']} ) return job_time except Exception: print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') lowercase_ = parser.parse_args() lowercase_ = get_job_time(args.workflow_run_id) lowercase_ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(f"{k}: {v['duration']}")
205
1
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case_ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case_ : Optional[int] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=8 ): _UpperCamelCase : Optional[int] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 _UpperCamelCase : List[Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class lowercase__ ( lowercase ): def __init__( self : List[str] ,lowerCamelCase__ : UNetaDConditionModel ,lowerCamelCase__ : DDPMScheduler ,lowerCamelCase__ : VQModel ,): '''simple docstring''' super().__init__() self.register_modules( unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,movq=lowerCamelCase__ ,) _UpperCamelCase : str = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Dict ): '''simple docstring''' if latents is None: _UpperCamelCase : Any = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=lowerCamelCase__ ) else: if latents.shape != shape: raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' ) _UpperCamelCase : Tuple = latents.to(lowerCamelCase__ ) _UpperCamelCase : Dict = latents * scheduler.init_noise_sigma return latents def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) _UpperCamelCase : str = torch.device(F'cuda:{gpu_id}' ) _UpperCamelCase : Dict = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowerCamelCase__ ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : int=0 ): '''simple docstring''' if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) _UpperCamelCase : List[Any] = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('cpu' ,silence_dtype_warnings=lowerCamelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _UpperCamelCase : Optional[int] = None for cpu_offloaded_model in [self.unet, self.movq]: _UpperCamelCase : int = cpu_offload_with_hook(lowerCamelCase__ ,lowerCamelCase__ ,prev_module_hook=lowerCamelCase__ ) # We'll offload the last model manually. _UpperCamelCase : Dict = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCamelCase_ ( self : Tuple ): '''simple docstring''' if not hasattr(self.unet ,'_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowerCamelCase__ ,'_hf_hook' ) and hasattr(module._hf_hook ,'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowerCamelCase__ ) def __call__( self : int ,lowerCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,lowerCamelCase__ : int = 512 ,lowerCamelCase__ : int = 512 ,lowerCamelCase__ : int = 100 ,lowerCamelCase__ : float = 4.0 ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase__ : Optional[torch.FloatTensor] = None ,lowerCamelCase__ : Optional[str] = "pil" ,lowerCamelCase__ : bool = True ,): '''simple docstring''' _UpperCamelCase : Any = self._execution_device _UpperCamelCase : List[str] = guidance_scale > 1.0 if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : Dict = torch.cat(lowerCamelCase__ ,dim=0 ) _UpperCamelCase : List[Any] = image_embeds.shape[0] * num_images_per_prompt if isinstance(lowerCamelCase__ ,lowerCamelCase__ ): _UpperCamelCase : str = torch.cat(lowerCamelCase__ ,dim=0 ) if do_classifier_free_guidance: _UpperCamelCase : Union[str, Any] = image_embeds.repeat_interleave(lowerCamelCase__ ,dim=0 ) _UpperCamelCase : int = negative_image_embeds.repeat_interleave(lowerCamelCase__ ,dim=0 ) _UpperCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase__ ) self.scheduler.set_timesteps(lowerCamelCase__ ,device=lowerCamelCase__ ) _UpperCamelCase : Tuple = self.scheduler.timesteps _UpperCamelCase : Optional[Any] = self.unet.config.in_channels _UpperCamelCase : List[Any] = downscale_height_and_width(lowerCamelCase__ ,lowerCamelCase__ ,self.movq_scale_factor ) # create initial latent _UpperCamelCase : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) ,image_embeds.dtype ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,self.scheduler ,) for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ): # expand the latents if we are doing classifier free guidance _UpperCamelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCamelCase : List[str] = {'image_embeds': image_embeds} _UpperCamelCase : Optional[Any] = self.unet( sample=lowerCamelCase__ ,timestep=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,added_cond_kwargs=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,)[0] if do_classifier_free_guidance: _UpperCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 ) _UpperCamelCase : Union[str, Any] = noise_pred.chunk(2 ) _UpperCamelCase : int = variance_pred.chunk(2 ) _UpperCamelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _UpperCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _UpperCamelCase : str = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 _UpperCamelCase : Tuple = self.scheduler.step( lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ ,)[0] # post-processing _UpperCamelCase : int = self.movq.decode(lowerCamelCase__ ,force_not_quantize=lowerCamelCase__ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: _UpperCamelCase : List[str] = image * 0.5 + 0.5 _UpperCamelCase : List[Any] = image.clamp(0 ,1 ) _UpperCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": _UpperCamelCase : List[str] = self.numpy_to_pil(lowerCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase__ )
351
'''simple docstring''' import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : Any = 'hf-internal-testing/tiny-random-t5' _UpperCamelCase : str = AutoTokenizer.from_pretrained(lowerCamelCase__ ) _UpperCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ) _UpperCamelCase : List[str] = tokenizer('This is me' ,return_tensors='pt' ) _UpperCamelCase : str = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) _UpperCamelCase : Optional[Any] = model.generate(**lowerCamelCase__ ) _UpperCamelCase : Union[str, Any] = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCamelCase__ ) _UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) _UpperCamelCase : Optional[Any] = model_reloaded.generate(**lowerCamelCase__ ) self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ) ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' _UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-t5' _UpperCamelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ ) _UpperCamelCase : List[str] = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowerCamelCase__ ): model.save_pretrained(lowerCamelCase__ ) _UpperCamelCase : str = model.reverse_bettertransformer() model.save_pretrained(lowerCamelCase__ )
236
0
import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration UpperCAmelCase_ = [ # tf -> hf ("""/""", """."""), ("""layer_""", """layers."""), ("""kernel""", """weight"""), ("""beta""", """bias"""), ("""gamma""", """weight"""), ("""pegasus""", """model"""), ] UpperCAmelCase_ = [ (""".output.dense""", """.fc2"""), ("""intermediate.LayerNorm""", """final_layer_norm"""), ("""intermediate.dense""", """fc1"""), ] UpperCAmelCase_ = ( INIT_COMMON + [ ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.out_proj"""), ("""attention.self""", """self_attn"""), ("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""), ("""attention.encdec_output.dense""", """encoder_attn.out_proj"""), ("""attention.encdec""", """encoder_attn"""), ("""key""", """k_proj"""), ("""value""", """v_proj"""), ("""query""", """q_proj"""), ("""decoder.LayerNorm""", """decoder.layernorm_embedding"""), ] + END_COMMON ) UpperCAmelCase_ = ( INIT_COMMON + [ ("""embeddings.word_embeddings""", """shared.weight"""), ("""embeddings.position_embeddings""", """embed_positions.weight"""), ("""attention.self.LayerNorm""", """self_attn_layer_norm"""), ("""attention.output.dense""", """self_attn.output"""), ("""attention.self""", """self_attn.self"""), ("""encoder.LayerNorm""", """encoder.layernorm_embedding"""), ] + END_COMMON ) UpperCAmelCase_ = [ """encdec/key/bias""", """encdec/query/bias""", """encdec/value/bias""", """self/key/bias""", """self/query/bias""", """self/value/bias""", """encdec_output/dense/bias""", """attention/output/dense/bias""", ] def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Dict ) -> Any: for tf_name, hf_name in patterns: UpperCamelCase__ : str = k.replace(lowerCAmelCase__ , lowerCAmelCase__ ) return k def lowerCAmelCase_ ( __UpperCAmelCase: dict , __UpperCAmelCase: dict ) -> BigBirdPegasusForConditionalGeneration: UpperCamelCase__ : str = BigBirdPegasusConfig(**lowerCAmelCase__ ) UpperCamelCase__ : List[str] = BigBirdPegasusForConditionalGeneration(lowerCAmelCase__ ) UpperCamelCase__ : str = torch_model.state_dict() UpperCamelCase__ : str = {} # separating decoder weights UpperCamelCase__ : Any = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} UpperCamelCase__ : Optional[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): UpperCamelCase__ : Optional[Any] = [k.endswith(lowerCAmelCase__ ) for ending in KEYS_TO_IGNORE] if any(lowerCAmelCase__ ): continue UpperCamelCase__ : Optional[Any] = DECODER_PATTERNS UpperCamelCase__ : Union[str, Any] = rename_state_dict_key(lowerCAmelCase__ , lowerCAmelCase__ ) if new_k not in state_dict: raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): UpperCamelCase__ : Optional[Any] = v.T UpperCamelCase__ : str = torch.from_numpy(lowerCAmelCase__ ) assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}" for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): UpperCamelCase__ : str = [k.endswith(lowerCAmelCase__ ) for ending in KEYS_TO_IGNORE] if any(lowerCAmelCase__ ): continue UpperCamelCase__ : Optional[int] = REMAINING_PATTERNS UpperCamelCase__ : Dict = rename_state_dict_key(lowerCAmelCase__ , lowerCAmelCase__ ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(f"could not find new key {new_k} in state dict. (converted from {k})" ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): UpperCamelCase__ : Optional[int] = v.T UpperCamelCase__ : Dict = torch.from_numpy(lowerCAmelCase__ ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, f"{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}" UpperCamelCase__ : Dict = mapping['model.embed_positions.weight'] UpperCamelCase__ : Optional[int] = mapping.pop('''model.embed_positions.weight''' ) UpperCamelCase__ : Dict = torch_model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ ) UpperCamelCase__ : List[Any] = [ k for k in missing if k not in [ 'final_logits_bias', 'model.encoder.embed_tokens.weight', 'model.decoder.embed_tokens.weight', 'lm_head.weight', ] ] assert unexpected_missing == [], f"no matches found for the following torch keys {unexpected_missing}" assert extra == [], f"no matches found for the following tf keys {extra}" return torch_model def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Dict: UpperCamelCase__ : Any = tf.train.list_variables(lowerCAmelCase__ ) UpperCamelCase__ : int = {} UpperCamelCase__ : Optional[int] = ['global_step'] for name, shape in tqdm(lowerCAmelCase__ , desc='''converting tf checkpoint to dict''' ): UpperCamelCase__ : Any = any(pat in name for pat in ignore_name ) if skip_key: continue UpperCamelCase__ : Union[str, Any] = tf.train.load_variable(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCamelCase__ : Any = array return tf_weights def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str , __UpperCAmelCase: dict ) -> List[Any]: UpperCamelCase__ : List[str] = get_tf_weights_as_numpy(lowerCAmelCase__ ) UpperCamelCase__ : Dict = convert_bigbird_pegasus(lowerCAmelCase__ , lowerCAmelCase__ ) torch_model.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase_ = parser.parse_args() UpperCAmelCase_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
201
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowercase__ : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase__ ( lowercase_, unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = XLMRobertaTokenizer _SCREAMING_SNAKE_CASE = XLMRobertaTokenizerFast _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True def SCREAMING_SNAKE_CASE__ ( self : List[str] ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase_ : Any = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] ): lowerCAmelCase_ : Any = '<pad>' lowerCAmelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : int = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_2 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_2 ) def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): lowerCAmelCase_ : int = XLMRobertaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('This is a test' ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) lowerCAmelCase_ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) def SCREAMING_SNAKE_CASE__ ( self : int ): if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return lowerCAmelCase_ : List[str] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : str = tempfile.mkdtemp() lowerCAmelCase_ : int = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) lowerCAmelCase_ : Optional[Any] = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f ) self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way lowerCAmelCase_ : int = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=True lowerCAmelCase_ : str = tempfile.mkdtemp() lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : str = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it save with the same files self.assertSequenceEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Checks everything loads correctly in the same way lowerCAmelCase_ : str = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Union[str, Any] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) # Save tokenizer rust, legacy_format=False lowerCAmelCase_ : int = tempfile.mkdtemp() lowerCAmelCase_ : Optional[Any] = tokenizer_r.save_pretrained(SCREAMING_SNAKE_CASE_ , legacy_format=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : int = tokenizer_p.save_pretrained(SCREAMING_SNAKE_CASE_ ) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way lowerCAmelCase_ : List[str] = tokenizer_r.from_pretrained(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Optional[int] = tokenizer_p.from_pretrained(SCREAMING_SNAKE_CASE_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) shutil.rmtree(SCREAMING_SNAKE_CASE_ ) @cached_property def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' ) def SCREAMING_SNAKE_CASE__ ( self : int ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name ) lowerCAmelCase_ : Tuple = XLMRobertaTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Dict = pickle.dumps(SCREAMING_SNAKE_CASE_ ) pickle.loads(SCREAMING_SNAKE_CASE_ ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): if not self.test_rust_tokenizer: return lowerCAmelCase_ : Union[str, Any] = self.get_tokenizer() lowerCAmelCase_ : Dict = self.get_rust_tokenizer() lowerCAmelCase_ : Tuple = 'I was born in 92000, and this is falsé.' lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Dict = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : int = self.get_rust_tokenizer() lowerCAmelCase_ : Any = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) lowerCAmelCase_ : List[Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): lowerCAmelCase_ : Any = 'Hello World!' lowerCAmelCase_ : Union[str, Any] = [0, 3_5_3_7_8, 6_6_6_1, 3_8, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): lowerCAmelCase_ : Tuple = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowerCAmelCase_ : int = [ 0, 3_2_9_3, 8_3, 1_0, 4_5_5_2, 4_9_8_9, 7_9_8_6, 6_7_8, 1_0, 5_9_1_5, 1_1_1, 1_7_9_4_5_9, 1_2_4_8_5_0, 4, 6_0_4_4, 2_3_7, 1_2, 6, 5, 6, 4, 6_7_8_0, 7_0_5, 1_5, 1_3_8_8, 4_4, 3_7_8, 1_0_1_1_4, 7_1_1, 1_5_2, 2_0, 6, 5, 2_2_3_7_6, 6_4_2, 1_2_2_1, 1_5_1_9_0, 3_4_1_5_3, 4_5_0, 5_6_0_8, 9_5_9, 1_1_1_9, 5_7_7_0_2, 1_3_6, 1_8_6, 4_7, 1_0_9_8, 2_9_3_6_7, 4_7, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 6_0_4_4, 2_3_7, 6_2_8_4, 5_0_9_0_1, 5_2_8, 3_1, 9_0, 3_4, 9_2_7, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def SCREAMING_SNAKE_CASE__ ( self : int ): # fmt: off lowerCAmelCase_ : List[str] = {'input_ids': [[0, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [0, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
224
0
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" if not numbers: return 0 if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ): raise ValueError('''numbers must be an iterable of integers''' ) A__ = numbers[0] for i in range(1 , len(UpperCAmelCase__ ) ): # update the maximum and minimum subarray products A__ = numbers[i] if number < 0: A__ = min_till_now, max_till_now A__ = max(UpperCAmelCase__ , max_till_now * number ) A__ = min(UpperCAmelCase__ , min_till_now * number ) # update the maximum product found till now A__ = max(UpperCAmelCase__ , UpperCAmelCase__ ) return max_prod
371
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str: """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(lowercase_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("""Hey wollef sroirraw"""))
231
0
"""simple docstring""" from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker __UpperCamelCase = '''CompVis/stable-diffusion-v1-1''' __UpperCamelCase = '''CompVis/stable-diffusion-v1-2''' __UpperCamelCase = '''CompVis/stable-diffusion-v1-3''' __UpperCamelCase = '''CompVis/stable-diffusion-v1-4''' class lowerCAmelCase ( lowerCamelCase_ ): '''simple docstring''' def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Optional[int]: super()._init_() SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = StableDiffusionPipeline( vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , requires_safety_checker=lowerCAmelCase__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def __A ( self ) -> Dict[str, Any]: return {k: getattr(self , lowerCAmelCase__ ) for k in self.config.keys() if not k.startswith('_' )} def __A ( self , lowerCAmelCase__ = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase__ ) def __A ( self ) -> Dict: self.enable_attention_slicing(lowerCAmelCase__ ) @torch.no_grad() def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> Optional[Any]: return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> Any: return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> Optional[int]: return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> int: return self.pipea( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) @torch.no_grad() def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 512 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = 7.5 , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , **lowerCAmelCase__ , ) -> List[Any]: SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu' self.to(lowerCAmelCase__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F'`height` and `width` must be divisible by 8 but are {height} and {width}.' ) # Get first result from Stable Diffusion Checkpoint v1.1 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 SCREAMING_SNAKE_CASE = self.textaimg_sda_a( prompt=lowerCAmelCase__ , height=lowerCAmelCase__ , width=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ , guidance_scale=lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ , num_images_per_prompt=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , latents=lowerCAmelCase__ , output_type=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=lowerCAmelCase__ , **lowerCAmelCase__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
113
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase ( datasets.BeamBasedBuilder ): '''simple docstring''' def __A ( self ) -> Dict: return datasets.DatasetInfo( features=datasets.Features({'content': datasets.Value('string' )} ) , supervised_keys=lowerCAmelCase__ , ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str: return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_dummy_examples()} )] def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ ) class lowerCAmelCase ( datasets.BeamBasedBuilder ): '''simple docstring''' def __A ( self ) -> int: return datasets.DatasetInfo( features=datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) , supervised_keys=lowerCAmelCase__ , ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict: return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'examples': get_test_nested_examples()} ) ] def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase__ ) def lowercase () -> str: return [(i, {"content": content}) for i, content in enumerate(['foo', 'bar', 'foobar'] )] def lowercase () -> Optional[Any]: return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['foo', 'bar', 'foobar'] )] class lowerCAmelCase ( lowerCamelCase_ ): '''simple docstring''' @require_beam def __A ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) SCREAMING_SNAKE_CASE = builder.as_dataset() self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ ) self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ ) self.assertDictEqual(dset['train'][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def __A ( self ) -> int: import apache_beam as beam SCREAMING_SNAKE_CASE = beam.io.parquetio.WriteToParquet SCREAMING_SNAKE_CASE = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' ) with patch('apache_beam.io.parquetio.WriteToParquet' ) as write_parquet_mock: SCREAMING_SNAKE_CASE = partial(lowerCAmelCase__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({'content': datasets.Value('string' )} ) ) SCREAMING_SNAKE_CASE = builder.as_dataset() self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ ) self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset['train']['content'] ) , sorted(['foo', 'bar', 'foobar'] ) ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset @require_beam def __A ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = DummyBeamDataset(cache_dir=lowerCAmelCase__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __A ( self ) -> int: SCREAMING_SNAKE_CASE = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: SCREAMING_SNAKE_CASE = NestedBeamDataset(cache_dir=lowerCAmelCase__ , beam_runner='DirectRunner' ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , F'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({'a': datasets.Sequence({'b': datasets.Value('string' )} )} ) ) SCREAMING_SNAKE_CASE = builder.as_dataset() self.assertEqual(dset['train'].num_rows , lowerCAmelCase__ ) self.assertEqual(dset['train'].info.splits['train'].num_examples , lowerCAmelCase__ ) self.assertDictEqual(dset['train'][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset['train'][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(lowerCAmelCase__ , builder.name , 'default' , '0.0.0' , 'dataset_info.json' ) ) ) del dset
113
1
from math import pow def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , ) -> tuple[int, int]: if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count lowercase__: int = int(pow(snake_case , snake_case ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n lowercase__: Any = backtrack( snake_case , snake_case , current_number + 1 , snake_case , snake_case ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. lowercase__: Optional[int] = backtrack( snake_case , snake_case , current_number + 1 , snake_case , snake_case ) return current_sum, solutions_count def snake_case_ ( snake_case , snake_case ) -> int: if not (1 <= needed_sum <= 10_00 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(snake_case , snake_case , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
354
from __future__ import annotations def snake_case_ ( snake_case , snake_case ) -> list[int]: lowercase__: Tuple = 0 lowercase__: str = len(snake_case ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: lowercase__: str = i + 1 else: lowercase__: Dict = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
288
0
import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def UpperCamelCase ( __lowerCamelCase : Tuple=None ): if subparsers is not None: snake_case : Tuple = subparsers.add_parser("env" ) else: snake_case : List[Any] = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" , default=UpperCamelCase_ , help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=UpperCamelCase_ ) return parser def UpperCamelCase ( __lowerCamelCase : Union[str, Any] ): snake_case : str = torch.__version__ snake_case : Union[str, Any] = torch.cuda.is_available() snake_case : Any = is_xpu_available() snake_case : int = is_npu_available() snake_case : str = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(UpperCamelCase_ ): snake_case : Any = load_config_from_file(args.config_file ).to_dict() snake_case : Tuple = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(UpperCamelCase_ ), "PyTorch NPU available": str(UpperCamelCase_ ), "System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""", } if pt_cuda_available: snake_case : int = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) snake_case : Tuple = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else f"""\t{accelerate_config}""" ) print(UpperCamelCase_ ) snake_case : Union[str, Any] = accelerate_config return info def UpperCamelCase ( ): snake_case : Tuple = env_command_parser() snake_case : List[Any] = parser.parse_args() env_command(UpperCamelCase_ ) return 0 if __name__ == "__main__": raise SystemExit(main())
59
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class _lowerCAmelCase ( lowercase ): """simple docstring""" __UpperCAmelCase : Tuple = "openai/whisper-base" __UpperCAmelCase : Union[str, Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __UpperCAmelCase : List[str] = "transcriber" __UpperCAmelCase : Optional[Any] = WhisperProcessor __UpperCAmelCase : str = WhisperForConditionalGeneration __UpperCAmelCase : List[str] = ["audio"] __UpperCAmelCase : Tuple = ["text"] def _lowercase ( self : str, UpperCAmelCase__ : int ): return self.pre_processor(UpperCAmelCase__, return_tensors="pt" ).input_features def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ): return self.model.generate(inputs=UpperCAmelCase__ ) def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[int] ): return self.pre_processor.batch_decode(UpperCAmelCase__, skip_special_tokens=UpperCAmelCase__ )[0]
17
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def A_ ( _lowerCAmelCase : Tuple ): """simple docstring""" _a = os.path.join(args.tf_model_dir, '''parameters.json''' ) _a = json.loads(open(_lowerCAmelCase ).read() ) if not params: raise ValueError( f'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith('''.pt''' ): _a = args.output + '''.pt''' _a = OrderedDict() with tf.device('''/CPU:0''' ): _a = tf.train.load_checkpoint(args.tf_model_dir ) _a = reader.get_variable_to_shape_map() for key_name in shapes.keys(): _a = reader.get_tensor(_lowerCAmelCase ).astype(np.floataa ) if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ): continue if key_name.startswith('''pasts/''' ): if key_name.startswith('''pasts/mlp''' ): _a = int(key_name[9] ) elif key_name.startswith('''pasts/out''' ): _a = 8 _a = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time _a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name.startswith('''model/moe''' ): _a = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/switch_gating/kernel''' ): _a = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player _a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/softmlp/kernel''' ): _a = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player _a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ): _a = key_name[-9:-7] for i in range(16 ): _a = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer) _a = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided _a = torch.tensor(_lowerCAmelCase ) elif key_name.startswith('''model/mlp''' ): _a = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/p1/kernel''' ): _a = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player _a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/p1/bias''' ): _a = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/p2/kernel''' ): _a = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player _a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/p2/bias''' ): _a = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) elif key_name.startswith('''model/ln''' ): _a = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _a = '''model.blocks.%d.feed_forward.norm.bias''' % player _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/g''' ): _a = '''model.blocks.%d.feed_forward.norm.weight''' % player _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) elif key_name.startswith('''model/att''' ): _a = int(key_name[9:].split('''/''' )[0] ) if key_name.endswith('''/qkv/kernel''' ): _a = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum _a = state[:, 0, :, :] _a = state[:, 1, :, :] _a = state[:, 2, :, :] _a = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _a = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _a = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix _a = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player _a = torch.tensor(_lowerCAmelCase ) _a = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player _a = torch.tensor(_lowerCAmelCase ) _a = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/o/kernel''' ): _a = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player _a = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name.startswith('''model/an''' ): _a = int(key_name[8:].split('''/''' )[0] ) if key_name.endswith('''/b''' ): _a = '''model.blocks.%d.self_attn.norm.bias''' % player _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) elif key_name.endswith('''/g''' ): _a = '''model.blocks.%d.self_attn.norm.weight''' % player _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) elif ( key_name.startswith('''model/wte''' ) or key_name.startswith('''model/wpe''' ) or key_name.startswith('''model/ete''' ) ): _a = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[ key_name[-3:] ] _a = '''model.%s.weight''' % nlayer _a = vnp.copy() # same in embedded _a = torch.tensor(_lowerCAmelCase ) if key_name.startswith('''model/wte''' ): _a = '''lm_head.weight''' _a = vnp.copy() # same in embedded _a = torch.tensor(_lowerCAmelCase ) elif key_name.startswith('''model/wob''' ): _a = '''final_logits_bias''' _a = vnp.copy() # same in embedded _a = state.reshape((1, -1) ) _a = torch.tensor(_lowerCAmelCase ) elif key_name == "model/dense/kernel": _a = '''model.last_project.weight''' _a = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix _a = torch.tensor(_lowerCAmelCase ) elif key_name == "model/dense_1/bias": _a = '''model.last_project.bias''' _a = vnp.copy() # same because it is one dimensional _a = torch.tensor(_lowerCAmelCase ) torch.save(_lowerCAmelCase, args.output ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') __snake_case = parser.parse_args() convert_tf_gptsan_to_pt(args)
153
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __snake_case = { '''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ResNetForImageClassification''', '''ResNetModel''', '''ResNetPreTrainedModel''', '''ResNetBackbone''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFResNetForImageClassification''', '''TFResNetModel''', '''TFResNetPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''FlaxResNetForImageClassification''', '''FlaxResNetModel''', '''FlaxResNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
153
1
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A_ (a_ , a_ , a_ , unittest.TestCase ): UpperCAmelCase__ = StableDiffusionInpaintPipeline UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS UpperCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS UpperCAmelCase__ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCAmelCase__ = frozenset([] ) def _lowercase ( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=_A , ) UpperCAmelCase = PNDMScheduler(skip_prk_steps=_A ) torch.manual_seed(0 ) UpperCAmelCase = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) UpperCAmelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) UpperCAmelCase = CLIPTextModel(_A ) UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _lowercase ( self , _A , _A=0 ): '''simple docstring''' UpperCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_A ) ).to(_A ) UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((6_4, 6_4) ) UpperCAmelCase = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) ) if str(_A ).startswith('''mps''' ): UpperCAmelCase = torch.manual_seed(_A ) else: UpperCAmelCase = torch.Generator(device=_A ).manual_seed(_A ) UpperCAmelCase = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': init_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase = self.get_dummy_components() UpperCAmelCase = StableDiffusionInpaintPipeline(**_A ) UpperCAmelCase = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) UpperCAmelCase = self.get_dummy_inputs(_A ) UpperCAmelCase = sd_pipe(**_A ).images UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _lowercase ( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench.npy''' ) UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9E-3 def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint''' '''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' ) UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained( _A , torch_dtype=torch.floataa , safety_checker=_A , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing() UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , ) UpperCAmelCase = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5E-1 def _lowercase ( self ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/sd2-inpaint/init_image.png''' ) UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' ) UpperCAmelCase = '''stabilityai/stable-diffusion-2-inpainting''' UpperCAmelCase = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' ) UpperCAmelCase = StableDiffusionInpaintPipeline.from_pretrained( _A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase = '''Face of a yellow cat, high resolution, sitting on a park bench''' UpperCAmelCase = torch.manual_seed(0 ) UpperCAmelCase = pipe( prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) UpperCAmelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 1_0**9
273
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , ) assert hasattr(self , '''env''' ) def _lowercase ( self , _A=1 ): '''simple docstring''' return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def _lowercase ( self , _A ): '''simple docstring''' TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
273
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __A : Union[str, Any] = { """configuration_time_series_transformer""": [ """TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimeSeriesTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Any = [ """TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimeSeriesTransformerForPrediction""", """TimeSeriesTransformerModel""", """TimeSeriesTransformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys __A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
363
from string import ascii_uppercase __A : int = {str(ord(c) - 55): c for c in ascii_uppercase} def __UpperCamelCase ( _A : int , _A : int ) ->str: """simple docstring""" if isinstance(_A , _A ): raise TypeError("""int() can't convert non-string with explicit base""" ) if num < 0: raise ValueError("""parameter must be positive int""" ) if isinstance(_A , _A ): raise TypeError("""'str' object cannot be interpreted as an integer""" ) if isinstance(_A , _A ): raise TypeError("""'float' object cannot be interpreted as an integer""" ) if base in (0, 1): raise ValueError("""base must be >= 2""" ) if base > 36: raise ValueError("""base must be <= 36""" ) lowerCamelCase_ ="""""" lowerCamelCase_ =0 lowerCamelCase_ =0 while div != 1: lowerCamelCase_ , lowerCamelCase_ =divmod(_A , _A ) if base >= 11 and 9 < mod < 36: lowerCamelCase_ =ALPHABET_VALUES[str(_A )] else: lowerCamelCase_ =str(_A ) new_value += actual_value lowerCamelCase_ =num // base lowerCamelCase_ =div if div == 0: return str(new_value[::-1] ) elif div == 1: new_value += str(_A ) return str(new_value[::-1] ) return new_value[::-1] if __name__ == "__main__": import doctest doctest.testmod() for base in range(2, 37): for num in range(10_00): assert int(decimal_to_any(num, base), base) == num, ( num, base, decimal_to_any(num, base), int(decimal_to_any(num, base), base), )
49
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _a : Optional[int] = logging.get_logger(__name__) _a : Tuple = { 'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Union[str, Any] = "poolformer" def __init__( self , a__=3 , a__=16 , a__=16 , a__=3 , a__=4.0 , a__=[2, 2, 6, 2] , a__=[64, 128, 320, 512] , a__=[7, 3, 3, 3] , a__=[4, 2, 2, 2] , a__=[2, 1, 1, 1] , a__=4 , a__=0.0 , a__="gelu" , a__=True , a__=1e-5 , a__=0.0_2 , **a__ , ): _lowerCAmelCase : List[Any] = num_channels _lowerCAmelCase : str = patch_size _lowerCAmelCase : Dict = stride _lowerCAmelCase : Optional[int] = padding _lowerCAmelCase : Optional[int] = pool_size _lowerCAmelCase : Dict = hidden_sizes _lowerCAmelCase : Optional[int] = mlp_ratio _lowerCAmelCase : Optional[int] = depths _lowerCAmelCase : Dict = patch_sizes _lowerCAmelCase : Tuple = strides _lowerCAmelCase : Any = num_encoder_blocks _lowerCAmelCase : Any = drop_path_rate _lowerCAmelCase : Tuple = hidden_act _lowerCAmelCase : Optional[Any] = use_layer_scale _lowerCAmelCase : List[str] = layer_scale_init_value _lowerCAmelCase : List[Any] = initializer_range super().__init__(**a__ ) class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Optional[Any] = version.parse("1.11" ) @property def __A ( self ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __A ( self ): return 2e-3
44
"""simple docstring""" import numpy as np import qiskit def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str: _lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _lowerCAmelCase : Tuple = 6 * key_len # Measurement basis for Alice's qubits. _lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase ) # The set of states Alice will prepare. _lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase ) # Measurement basis for Bob's qubits. _lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase ) # Quantum Circuit to simulate BB84 _lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(_lowerCamelCase ): if alice_state[index] == 1: bbaa_circ.x(_lowerCamelCase ) if alice_basis[index] == 1: bbaa_circ.h(_lowerCamelCase ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(_lowerCamelCase ): if bob_basis[index] == 1: bbaa_circ.h(_lowerCamelCase ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase ) # Returns the result of measurement. _lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _lowerCAmelCase : str = """""".join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( _lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. _lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" ) return key if __name__ == "__main__": print(F"""The generated key is : {bbaa(8, seed=0)}""") from doctest import testmod testmod()
44
1
def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = set() # To detect a back edge, keep track of vertices currently in the recursion stack lowercase__ = set() return any( node not in visited and depth_first_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for node in graph ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" visited.add(SCREAMING_SNAKE_CASE ) rec_stk.add(SCREAMING_SNAKE_CASE ) for node in graph[vertex]: if node not in visited: if depth_first_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(SCREAMING_SNAKE_CASE ) return False if __name__ == "__main__": from doctest import testmod testmod()
93
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase = { 'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'], 'tokenization_luke': ['LukeTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase = [ 'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST', 'LukeForEntityClassification', 'LukeForEntityPairClassification', 'LukeForEntitySpanClassification', 'LukeForMultipleChoice', 'LukeForQuestionAnswering', 'LukeForSequenceClassification', 'LukeForTokenClassification', 'LukeForMaskedLM', 'LukeModel', 'LukePreTrainedModel', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
93
1
def A ( a_ ,a_ ,a_ ) -> Tuple: if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(a_ ,n - 1 ,a_ ) * a) % mod else: __UpperCamelCase : Dict =binary_exponentiation(a_ ,n / 2 ,a_ ) return (b * b) % mod # a prime number A_ :str = 701 A_ :Optional[Any] = 1000000000 A_ :int = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
71
from typing import Union import fire import torch from tqdm import tqdm def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ = "cpu" , lowerCAmelCase_ = None )-> None: lowerCAmelCase_ : str = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ ) for k, v in tqdm(state_dict.items() ): if not isinstance(lowerCAmelCase_ , torch.Tensor ): raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' ) lowerCAmelCase_ : int = v.half() if save_path is None: # overwrite src_path lowerCAmelCase_ : Tuple = src_path torch.save(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": fire.Fire(convert)
262
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) A__ : Any = { 'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'], 'processing_layoutlmv2': ['LayoutLMv2Processor'], 'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = ['LayoutLMv2TokenizerFast'] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = ['LayoutLMv2FeatureExtractor'] A__ : Dict = ['LayoutLMv2ImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ 'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv2ForQuestionAnswering', 'LayoutLMv2ForSequenceClassification', 'LayoutLMv2ForTokenClassification', 'LayoutLMv2Layer', 'LayoutLMv2Model', 'LayoutLMv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys A__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
353
"""simple docstring""" import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def _snake_case ( lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) -> List[Any]: if isinstance(lowerCamelCase__ , torch.Tensor ): return image elif isinstance(lowerCamelCase__ , PIL.Image.Image ): lowerCamelCase_ : int =[image] if isinstance(image[0] , PIL.Image.Image ): lowerCamelCase_ : Tuple =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image] lowerCamelCase_ : Any =np.concatenate(lowerCamelCase__ , axis=0 ) lowerCamelCase_ : Any =np.array(lowerCamelCase__ ).astype(np.floataa ) / 255.0 lowerCamelCase_ : Any =image.transpose(0 , 3 , 1 , 2 ) lowerCamelCase_ : Dict =2.0 * image - 1.0 lowerCamelCase_ : List[Any] =torch.from_numpy(lowerCamelCase__ ) elif isinstance(image[0] , torch.Tensor ): lowerCamelCase_ : Tuple =torch.cat(lowerCamelCase__ , dim=0 ) return image def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int]=0.9995 ) -> List[str]: if not isinstance(lowerCamelCase__ , np.ndarray ): lowerCamelCase_ : List[Any] =True lowerCamelCase_ : Tuple =va.device lowerCamelCase_ : Optional[int] =va.cpu().numpy() lowerCamelCase_ : List[Any] =va.cpu().numpy() lowerCamelCase_ : Any =np.sum(va * va / (np.linalg.norm(lowerCamelCase__ ) * np.linalg.norm(lowerCamelCase__ )) ) if np.abs(lowerCamelCase__ ) > DOT_THRESHOLD: lowerCamelCase_ : str =(1 - t) * va + t * va else: lowerCamelCase_ : Tuple =np.arccos(lowerCamelCase__ ) lowerCamelCase_ : Any =np.sin(lowerCamelCase__ ) lowerCamelCase_ : Optional[int] =theta_a * t lowerCamelCase_ : Tuple =np.sin(lowerCamelCase__ ) lowerCamelCase_ : List[str] =np.sin(theta_a - theta_t ) / sin_theta_a lowerCamelCase_ : Union[str, Any] =sin_theta_t / sin_theta_a lowerCamelCase_ : str =sa * va + sa * va if inputs_are_torch: lowerCamelCase_ : Any =torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ ) return va def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict ) -> List[str]: lowerCamelCase_ : List[Any] =F.normalize(lowerCamelCase__ , dim=-1 ) lowerCamelCase_ : Any =F.normalize(lowerCamelCase__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ) -> Any: for param in model.parameters(): lowerCamelCase_ : Any =value class lowercase__ ( snake_case__ ): def __init__( self : Union[str, Any] , snake_case__ : AutoencoderKL , snake_case__ : CLIPTextModel , snake_case__ : CLIPModel , snake_case__ : CLIPTokenizer , snake_case__ : UNetaDConditionModel , snake_case__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , snake_case__ : CLIPFeatureExtractor , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : str=None , ): super().__init__() self.register_modules( vae=snake_case__ , text_encoder=snake_case__ , clip_model=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , feature_extractor=snake_case__ , coca_model=snake_case__ , coca_tokenizer=snake_case__ , coca_transform=snake_case__ , ) lowerCamelCase_ : Optional[Any] =( feature_extractor.size if isinstance(feature_extractor.size , snake_case__ ) else feature_extractor.size["shortest_edge"] ) lowerCamelCase_ : Union[str, Any] =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , snake_case__ ) set_requires_grad(self.clip_model , snake_case__ ) def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCamelCase_ : List[Any] =self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(snake_case__ ) def UpperCAmelCase__ ( self : int ): self.enable_attention_slicing(snake_case__ ) def UpperCAmelCase__ ( self : str ): set_requires_grad(self.vae , snake_case__ ) def UpperCAmelCase__ ( self : List[str] ): set_requires_grad(self.vae , snake_case__ ) def UpperCAmelCase__ ( self : List[Any] ): set_requires_grad(self.unet , snake_case__ ) def UpperCAmelCase__ ( self : Tuple ): set_requires_grad(self.unet , snake_case__ ) def UpperCAmelCase__ ( self : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : Tuple ): # get the original timestep using init_timestep lowerCamelCase_ : Optional[int] =min(int(num_inference_steps * strength ) , snake_case__ ) lowerCamelCase_ : Dict =max(num_inference_steps - init_timestep , 0 ) lowerCamelCase_ : Union[str, Any] =self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[Any]=None ): if not isinstance(snake_case__ , torch.Tensor ): raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(snake_case__ )}""" ) lowerCamelCase_ : List[str] =image.to(device=snake_case__ , dtype=snake_case__ ) if isinstance(snake_case__ , snake_case__ ): lowerCamelCase_ : Optional[int] =[ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(snake_case__ ) ] lowerCamelCase_ : Tuple =torch.cat(snake_case__ , dim=0 ) else: lowerCamelCase_ : Any =self.vae.encode(snake_case__ ).latent_dist.sample(snake_case__ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase_ : Optional[Any] =0.18_215 * init_latents lowerCamelCase_ : Optional[Any] =init_latents.repeat_interleave(snake_case__ , dim=0 ) lowerCamelCase_ : List[Any] =randn_tensor(init_latents.shape , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) # get latents lowerCamelCase_ : Optional[int] =self.scheduler.add_noise(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase_ : int =init_latents return latents def UpperCAmelCase__ ( self : Dict , snake_case__ : List[Any] ): lowerCamelCase_ : Optional[int] =self.coca_transform(snake_case__ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): lowerCamelCase_ : Tuple =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) lowerCamelCase_ : str =self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def UpperCAmelCase__ ( self : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ): lowerCamelCase_ : int =self.feature_extractor.preprocess(snake_case__ ) lowerCamelCase_ : Tuple =torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() lowerCamelCase_ : Optional[int] =self.clip_model.get_image_features(snake_case__ ) lowerCamelCase_ : List[str] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ ) lowerCamelCase_ : str =image_embeddings_clip.repeat_interleave(snake_case__ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : str , ): lowerCamelCase_ : Any =latents.detach().requires_grad_() lowerCamelCase_ : Union[str, Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ ) # predict the noise residual lowerCamelCase_ : Any =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): lowerCamelCase_ : Any =self.scheduler.alphas_cumprod[timestep] lowerCamelCase_ : str =1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowerCamelCase_ : Dict =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 lowerCamelCase_ : Tuple =torch.sqrt(snake_case__ ) lowerCamelCase_ : Optional[int] =pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , snake_case__ ): lowerCamelCase_ : Optional[Any] =self.scheduler.sigmas[index] lowerCamelCase_ : Optional[int] =latents - sigma * noise_pred else: raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase_ : str =1 / 0.18_215 * sample lowerCamelCase_ : List[Any] =self.vae.decode(snake_case__ ).sample lowerCamelCase_ : List[Any] =(image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : Any =transforms.Resize(self.feature_extractor_size )(snake_case__ ) lowerCamelCase_ : Any =self.normalize(snake_case__ ).to(latents.dtype ) lowerCamelCase_ : Any =self.clip_model.get_image_features(snake_case__ ) lowerCamelCase_ : Optional[int] =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=snake_case__ ) lowerCamelCase_ : Union[str, Any] =spherical_dist_loss(snake_case__ , snake_case__ ).mean() * clip_guidance_scale lowerCamelCase_ : Union[str, Any] =-torch.autograd.grad(snake_case__ , snake_case__ )[0] if isinstance(self.scheduler , snake_case__ ): lowerCamelCase_ : int =latents.detach() + grads * (sigma**2) lowerCamelCase_ : Tuple =noise_pred_original else: lowerCamelCase_ : Union[str, Any] =noise_pred_original - torch.sqrt(snake_case__ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Any , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Union[torch.FloatTensor, PIL.Image.Image] , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[int] = 512 , snake_case__ : Optional[int] = 512 , snake_case__ : float = 0.6 , snake_case__ : Optional[int] = 50 , snake_case__ : Optional[float] = 7.5 , snake_case__ : Optional[int] = 1 , snake_case__ : float = 0.0 , snake_case__ : Optional[float] = 100 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , snake_case__ : float = 0.8 , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , ): if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) != batch_size: raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(snake_case__ )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(snake_case__ , torch.Generator ) and batch_size > 1: lowerCamelCase_ : List[str] =[generator] + [None] * (batch_size - 1) lowerCamelCase_ : Any =[ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] lowerCamelCase_ : Optional[Any] =[x[0] for x in coca_is_none if x[1]] lowerCamelCase_ : Any =", ".join(snake_case__ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(snake_case__ ): raise ValueError( F"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) lowerCamelCase_ : Union[str, Any] =self.get_image_description(snake_case__ ) if style_prompt is None: if len(snake_case__ ): raise ValueError( F"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) lowerCamelCase_ : str =self.get_image_description(snake_case__ ) # get prompt text embeddings for content and style lowerCamelCase_ : str =self.tokenizer( snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , ) lowerCamelCase_ : List[str] =self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] lowerCamelCase_ : int =self.tokenizer( snake_case__ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=snake_case__ , return_tensors="pt" , ) lowerCamelCase_ : Optional[int] =self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] lowerCamelCase_ : Dict =slerp(snake_case__ , snake_case__ , snake_case__ ) # duplicate text embeddings for each generation per prompt lowerCamelCase_ : str =text_embeddings.repeat_interleave(snake_case__ , dim=0 ) # set timesteps lowerCamelCase_ : List[Any] ="offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) lowerCamelCase_ : List[Any] ={} if accepts_offset: lowerCamelCase_ : Optional[int] =1 self.scheduler.set_timesteps(snake_case__ , **snake_case__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) lowerCamelCase_ , lowerCamelCase_ : Optional[int] =self.get_timesteps(snake_case__ , snake_case__ , self.device ) lowerCamelCase_ : Union[str, Any] =timesteps[:1].repeat(snake_case__ ) # Preprocess image lowerCamelCase_ : str =preprocess(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase_ : int =self.prepare_latents( snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ ) lowerCamelCase_ : Dict =preprocess(snake_case__ , snake_case__ , snake_case__ ) lowerCamelCase_ : Tuple =self.prepare_latents( snake_case__ , snake_case__ , snake_case__ , text_embeddings.dtype , self.device , snake_case__ ) lowerCamelCase_ : int =slerp(snake_case__ , snake_case__ , snake_case__ ) if clip_guidance_scale > 0: lowerCamelCase_ : List[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ ) lowerCamelCase_ : Optional[Any] =self.get_clip_image_embeddings(snake_case__ , snake_case__ ) lowerCamelCase_ : List[Any] =slerp( snake_case__ , snake_case__ , snake_case__ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCamelCase_ : str =guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCamelCase_ : List[Any] =content_text_input.input_ids.shape[-1] lowerCamelCase_ : Union[str, Any] =self.tokenizer([""] , padding="max_length" , max_length=snake_case__ , return_tensors="pt" ) lowerCamelCase_ : List[Any] =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt lowerCamelCase_ : List[str] =uncond_embeddings.repeat_interleave(snake_case__ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCamelCase_ : List[str] =torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCamelCase_ : Optional[Any] =(batch_size, self.unet.config.in_channels, height // 8, width // 8) lowerCamelCase_ : List[str] =text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps lowerCamelCase_ : Union[str, Any] =torch.randn(snake_case__ , generator=snake_case__ , device="cpu" , dtype=snake_case__ ).to( self.device ) else: lowerCamelCase_ : Tuple =torch.randn(snake_case__ , generator=snake_case__ , device=self.device , dtype=snake_case__ ) else: if latents.shape != latents_shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) lowerCamelCase_ : Any =latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCamelCase_ : Optional[int] =latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCamelCase_ : List[Any] ="eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCamelCase_ : Dict ={} if accepts_eta: lowerCamelCase_ : Tuple =eta # check if the scheduler accepts generator lowerCamelCase_ : Tuple ="generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: lowerCamelCase_ : Optional[int] =generator with self.progress_bar(total=snake_case__ ): for i, t in enumerate(snake_case__ ): # expand the latents if we are doing classifier free guidance lowerCamelCase_ : List[str] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCamelCase_ : Optional[Any] =self.scheduler.scale_model_input(snake_case__ , snake_case__ ) # predict the noise residual lowerCamelCase_ : Tuple =self.unet(snake_case__ , snake_case__ , encoder_hidden_states=snake_case__ ).sample # perform classifier free guidance if do_classifier_free_guidance: lowerCamelCase_ , lowerCamelCase_ : Dict =noise_pred.chunk(2 ) lowerCamelCase_ : Optional[Any] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: lowerCamelCase_ : Optional[int] =( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] =self.cond_fn( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ) # compute the previous noisy sample x_t -> x_t-1 lowerCamelCase_ : Dict =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowerCamelCase_ : str =1 / 0.18_215 * latents lowerCamelCase_ : List[str] =self.vae.decode(snake_case__ ).sample lowerCamelCase_ : str =(image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ : List[Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase_ : List[str] =self.numpy_to_pil(snake_case__ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=snake_case__ , nsfw_content_detected=snake_case__ )
209
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Any = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _SCREAMING_SNAKE_CASE ( _a ): snake_case__ : str = """donut-swin""" snake_case__ : str = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[Any] , __lowerCamelCase : Union[str, Any]=224 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : str=3 , __lowerCamelCase : Dict=96 , __lowerCamelCase : Dict=[2, 2, 6, 2] , __lowerCamelCase : List[Any]=[3, 6, 12, 24] , __lowerCamelCase : int=7 , __lowerCamelCase : int=4.0 , __lowerCamelCase : Dict=True , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Any=0.0 , __lowerCamelCase : List[Any]=0.1 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Union[str, Any]=False , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Dict=1E-5 , **__lowerCamelCase : Dict , ): super().__init__(**__lowerCamelCase ) UpperCamelCase :Tuple = image_size UpperCamelCase :Optional[Any] = patch_size UpperCamelCase :Union[str, Any] = num_channels UpperCamelCase :Tuple = embed_dim UpperCamelCase :str = depths UpperCamelCase :Any = len(__lowerCamelCase ) UpperCamelCase :Optional[int] = num_heads UpperCamelCase :Union[str, Any] = window_size UpperCamelCase :Optional[Any] = mlp_ratio UpperCamelCase :List[Any] = qkv_bias UpperCamelCase :Any = hidden_dropout_prob UpperCamelCase :Any = attention_probs_dropout_prob UpperCamelCase :List[Any] = drop_path_rate UpperCamelCase :int = hidden_act UpperCamelCase :Union[str, Any] = use_absolute_embeddings UpperCamelCase :int = layer_norm_eps UpperCamelCase :str = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCamelCase :Any = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
38
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase__ = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = ["YolosFeatureExtractor"] lowercase__ = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
151
0
import random import unittest import numpy as np import torch from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __snake_case ( _lowercase , unittest.TestCase): # TODO: is there an appropriate internal test set? snake_case__ : List[str] = "ssube/stable-diffusion-x4-upscaler-onnx" def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int=0 ): """simple docstring""" _lowerCamelCase : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCAmelCase ) ) _lowerCamelCase : Union[str, Any] = torch.manual_seed(__lowerCAmelCase ) _lowerCamelCase : Tuple = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.get_dummy_inputs() _lowerCamelCase : Any = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] ) assert np.abs(image_slice - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" _lowerCamelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = self.get_dummy_inputs() _lowerCamelCase : Optional[Any] = pipe(**__lowerCAmelCase ).images _lowerCamelCase : str = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : Optional[int] = np.array( [0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Tuple = self.get_dummy_inputs() _lowerCamelCase : str = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array( [0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = self.get_dummy_inputs() _lowerCamelCase : Tuple = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : Union[str, Any] = np.array( [0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' ) _lowerCamelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = self.get_dummy_inputs() _lowerCamelCase : List[Any] = pipe(**__lowerCAmelCase ).images _lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : Optional[int] = np.array( [0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 @nightly @require_onnxruntime @require_torch_gpu class __snake_case ( unittest.TestCase): @property def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" _lowerCamelCase : Optional[int] = ort.SessionOptions() _lowerCamelCase : List[str] = False return options def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" _lowerCamelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _lowerCamelCase : Any = init_image.resize((1_2_8, 1_2_8) ) # using the PNDM scheduler by default _lowerCamelCase : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : int = '''A fantasy landscape, trending on artstation''' _lowerCamelCase : List[Any] = torch.manual_seed(0 ) _lowerCamelCase : List[str] = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCAmelCase , output_type='''np''' , ) _lowerCamelCase : List[Any] = output.images _lowerCamelCase : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" _lowerCamelCase : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/img2img/sketch-mountains-input.jpg''' ) _lowerCamelCase : int = init_image.resize((1_2_8, 1_2_8) ) _lowerCamelCase : str = LMSDiscreteScheduler.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' ) _lowerCamelCase : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained( '''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__lowerCAmelCase ) _lowerCamelCase : Optional[int] = '''A fantasy landscape, trending on artstation''' _lowerCamelCase : int = torch.manual_seed(0 ) _lowerCamelCase : List[str] = pipe( prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCAmelCase , output_type='''np''' , ) _lowerCamelCase : Union[str, Any] = output.images _lowerCamelCase : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) _lowerCamelCase : str = np.array( [0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
371
"""simple docstring""" import math def snake_case_ ( A_ : int ): '''simple docstring''' return math.sqrt(A_ ) * math.sqrt(A_ ) == num def snake_case_ ( A_ : int ): '''simple docstring''' _lowerCamelCase : Dict = 0 _lowerCamelCase : Tuple = n while left <= right: _lowerCamelCase : List[str] = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: _lowerCamelCase : int = mid - 1 else: _lowerCamelCase : Optional[Any] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
175
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _UpperCAmelCase : Any = None _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : Tuple = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : Any = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), }, "tokenizer_file": { "google/bigbird-roberta-base": ( "https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json" ), "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json" ), }, } _UpperCAmelCase : Any = { "google/bigbird-roberta-base": 4_096, "google/bigbird-roberta-large": 4_096, "google/bigbird-base-trivia-itc": 4_096, } _UpperCAmelCase : Tuple = "▁" class lowercase ( _SCREAMING_SNAKE_CASE ): __lowercase : List[str] = VOCAB_FILES_NAMES __lowercase : str = PRETRAINED_VOCAB_FILES_MAP __lowercase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : Optional[int] = BigBirdTokenizer __lowercase : Union[str, Any] = ["input_ids", "attention_mask"] __lowercase : List[int] = [] def __init__( self , A_=None , A_=None , A_="<unk>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="[SEP]" , A_="[MASK]" , A_="[CLS]" , **A_ , ) -> List[str]: """simple docstring""" UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCamelCase = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token super().__init__( A_ , tokenizer_file=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , ) UpperCamelCase = vocab_file UpperCamelCase = False if not self.vocab_file else True def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]: """simple docstring""" UpperCamelCase = [self.sep_token_id] UpperCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(A_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return UpperCamelCase = os.path.join( A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ): copyfile(self.vocab_file , A_ ) return (out_vocab_file,)
222
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def A ( ) -> Union[str, Any]: '''simple docstring''' UpperCamelCase = { 'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'], 'path': ['test_1.py', 'test_2.py', 'unit_test.py'], 'content': ['a ' * 20, 'a ' * 30, 'b ' * 7], } UpperCamelCase = Dataset.from_dict(lowercase ) return dataset class lowercase ( _SCREAMING_SNAKE_CASE ): def __UpperCamelCase ( self ) -> Tuple: """simple docstring""" UpperCamelCase = get_dataset() UpperCamelCase = make_duplicate_clusters(A_ , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __UpperCamelCase ( self ) -> Any: """simple docstring""" UpperCamelCase = get_dataset() UpperCamelCase , UpperCamelCase = deduplicate_dataset(A_ ) self.assertEqual(len(A_ ) , 2 ) print(A_ ) self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 ) self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , A_ )
222
1
'''simple docstring''' import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __lowerCAmelCase : str ="base_with_context" def UpperCamelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict ): A__ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) A__ = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): A__ = weights[F"layers_{lyr_num}"] A__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) A__ = ly_weight["""attention"""] A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] ): A__ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) A__ = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCAmelCase ) for lyr_num, lyr in enumerate(model.encoders ): A__ = weights[F"layers_{lyr_num}"] A__ = ly_weight["""attention"""] A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) A__ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def UpperCamelCase ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] ): A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) A__ = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCAmelCase ) A__ = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): A__ = weights[F"layers_{lyr_num}"] A__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) A__ = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) A__ = ly_weight["""self_attention"""] A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A__ = ly_weight["""MultiHeadDotProductAttention_0"""] A__ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) A__ = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) A__ = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) A__ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) A__ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def UpperCamelCase ( _lowerCamelCase : Tuple ): A__ = checkpoints.load_tax_checkpoint(args.checkpoint_path ) A__ = jnp.tree_util.tree_map(onp.array , __lowerCAmelCase ) A__ = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] A__ = os.path.join(args.checkpoint_path , ".." , "config.gin" ) A__ = inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase ) A__ = inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase ) A__ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) A__ = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) A__ = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) A__ = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) A__ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , __lowerCAmelCase ) A__ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , __lowerCAmelCase ) A__ = load_decoder(ta_checkpoint["target"]["decoder"] , __lowerCAmelCase ) A__ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) A__ = SpectrogramDiffusionPipeline( notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __lowerCAmelCase : str =argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=f"""{MODEL}/checkpoint_500000""", type=str, required=False, help="Path to the original jax model checkpoint.", ) __lowerCAmelCase : List[str] =parser.parse_args() main(args)
362
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Any =logging.get_logger(__name__) def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple=False ): A__ = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") ) rename_keys.append((F"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", F"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def UpperCamelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False ): for i in range(config.num_hidden_layers ): if base_model: A__ = "" else: A__ = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) A__ = state_dict.pop(F"blocks.{i}.attn.qkv.weight" ) A__ = state_dict.pop(F"blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict A__ = in_proj_weight[ : config.hidden_size, : ] A__ = in_proj_bias[: config.hidden_size] A__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] A__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] A__ = in_proj_weight[ -config.hidden_size :, : ] A__ = in_proj_bias[-config.hidden_size :] def UpperCamelCase ( _lowerCamelCase : Any ): A__ = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCamelCase , _lowerCamelCase ) def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] ): A__ = dct.pop(_lowerCamelCase ) A__ = val def UpperCamelCase ( ): A__ = "http://images.cocodataset.org/val2017/000000039769.jpg" A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int=False ): A__ = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=_lowerCamelCase , ) A__ = ViTHybridConfig(backbone_config=_lowerCamelCase , image_size=3_84 , num_labels=10_00 ) A__ = False # load original model from timm A__ = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys A__ = timm_model.state_dict() if base_model: remove_classification_head_(_lowerCamelCase ) A__ = create_rename_keys(_lowerCamelCase , _lowerCamelCase ) for src, dest in rename_keys: rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) A__ = "huggingface/label-files" A__ = "imagenet-1k-id2label.json" A__ = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) ) A__ = {int(_lowerCamelCase ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": A__ = ViTHybridModel(_lowerCamelCase ).eval() else: A__ = ViTHybridForImageClassification(_lowerCamelCase ).eval() model.load_state_dict(_lowerCamelCase ) # create image processor A__ = create_transform(**resolve_data_config({} , model=_lowerCamelCase ) ) A__ = transform.transforms A__ = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } A__ = ViTHybridImageProcessor( do_resize=_lowerCamelCase , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_lowerCamelCase , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=_lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) A__ = prepare_img() A__ = transform(_lowerCamelCase ).unsqueeze(0 ) A__ = processor(_lowerCamelCase , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(_lowerCamelCase , _lowerCamelCase ) # verify logits with torch.no_grad(): A__ = model(_lowerCamelCase ) A__ = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: A__ = timm_model.forward_features(_lowerCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1e-3 ) else: A__ = timm_model(_lowerCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1e-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(_lowerCamelCase ) print(F"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(_lowerCamelCase ) if push_to_hub: print(F"Pushing model and processor to the hub {vit_name}" ) model.push_to_hub(F"ybelkada/{vit_name}" ) processor.push_to_hub(F"ybelkada/{vit_name}" ) if __name__ == "__main__": __lowerCAmelCase : Dict =argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) __lowerCAmelCase : Optional[Any] =parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
123
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class a__( unittest.TestCase ): @slow def lowercase_ ( self : List[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[int] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = TFAutoModel.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModel.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Union[str, Any] = TFAutoModelForPreTraining.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = AutoModelForPreTraining.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Union[str, Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForCausalLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Any = TFAutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForCausalLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForCausalLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Any ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[str] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[int] ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : List[str] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Optional[Any] = TFAutoModelForMaskedLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Optional[int] = TFAutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = AutoModelForMaskedLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : Tuple = AutoModelForMaskedLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : int ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : str = TFAutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_pt=__snake_case ) a , a : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSeqaSeqLM.from_pretrained(__snake_case , from_tf=__snake_case ) a , a : str = AutoModelForSeqaSeqLM.from_pretrained( __snake_case , output_loading_info=__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : Optional[Any] ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Tuple = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Dict = AutoModelForSequenceClassification.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowercase_ ( self : str ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: a : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : int = TFAutoModelForQuestionAnswering.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) a : Tuple = AutoModelForQuestionAnswering.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) def lowercase_ ( self : Tuple ): a : List[Any] = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[int] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) def lowercase_ ( self : Any ): a : int = TFAutoModelWithLMHead.from_pretrained(__snake_case , from_pt=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 ) a : Optional[Any] = AutoModelWithLMHead.from_pretrained(__snake_case , from_tf=__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__snake_case ) , 1_44_10 )
297
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class a__( lowerCamelCase__ ): def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ): a : Union[str, Any] = tokenizer a : Union[str, Any] = dataset a : Any = len(__snake_case ) if n_tasks is None else n_tasks a : List[str] = n_copies def __iter__( self : str ): a : List[Any] = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() ) a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class a__( lowerCamelCase__ ): def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ): a : Dict = start_length a : Dict = eof_strings a : str = tokenizer def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ): a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) a : Optional[int] = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(__snake_case ) def lowerCamelCase__ ( _A ): a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A ) # last string should be "" return "".join(string_list[:-2] ) def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ): a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens for step, batch in tqdm(enumerate(_A ) ): with torch.no_grad(): a : Optional[Any] = batch['ids'].shape[-1] a : Optional[Any] = accelerator.unwrap_model(_A ).generate( input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A ) # each task is generated batch_size times a : Tuple = batch['task_id'].repeat(_A ) a : List[Any] = accelerator.pad_across_processes( _A , dim=1 , pad_index=tokenizer.pad_token_id ) a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) ) a : List[str] = generated_tokens.cpu().numpy() a : int = generated_tasks.cpu().numpy() for task, generated_tokens in zip(_A , _A ): gen_token_dict[task].append(_A ) a : Any = [[] for _ in range(_A )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A ) code_gens[task].append(remove_last_block(_A ) ) return code_gens def lowerCamelCase__ ( ): # Setup configuration a : Dict = HfArgumentParser(_A ) a : Any = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric a : List[Any] = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing a : int = 'false' if args.num_workers is None: a : Dict = multiprocessing.cpu_count() # Use dataset load to feed to accelerate a : List[Any] = Accelerator() set_seed(args.seed , device_specific=_A ) # Load model and tokenizer a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt ) a : str = tokenizer.eos_token a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings a : Optional[Any] = { 'do_sample': args.do_sample, 'temperature': args.temperature, 'max_new_tokens': args.max_new_tokens, 'top_p': args.top_p, 'top_k': args.top_k, 'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ), } # Load evaluation dataset and metric a : Optional[int] = load_dataset('openai_humaneval' ) a : Optional[Any] = load_metric('code_eval' ) a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] ) a : Optional[Any] = args.n_samples // args.batch_size a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A ) # do not confuse args.batch_size, which is actually the num_return_sequences a : int = DataLoader(_A , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: a : int = code_eval_metric.compute(references=[''] , predictions=[['']] ) except ValueError as exception: print( 'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`' ' flag to enable code evaluation.' ) raise exception a , a : int = accelerator.prepare(_A , _A ) a : int = complete_code( _A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , ) if accelerator.is_main_process: a : List[str] = [] for task in tqdm(range(_A ) ): a : int = human_eval['test'][task]['test'] a : int = f"""check({human_eval["test"][task]["entry_point"]})""" references.append('\n' + test_func + '\n' + entry_point ) # Evaluate completions with "code_eval" metric a , a : Tuple = code_eval_metric.compute( references=_A , predictions=_A , num_workers=args.num_workers ) print(f"""Results: {pass_at_k}""" ) # Save results to json file with open(args.output_file , 'w' ) as fp: json.dump(_A , _A ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
297
1
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu UpperCamelCase = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: UpperCamelCase = json.load(f) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: '''simple docstring''' return FSMTTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ) def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ) -> str: '''simple docstring''' A: str = FSMTForConditionalGeneration.from_pretrained(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[Any]: '''simple docstring''' A: List[str] = f"""facebook/wmt19-{pair}""" A: Any = self.get_tokenizer(SCREAMING_SNAKE_CASE_ ) A: int = self.get_model(SCREAMING_SNAKE_CASE_ ) A: str = bleu_data[pair]['''src'''] A: str = bleu_data[pair]['''tgt'''] A: Dict = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ , padding='''longest''' ).to(SCREAMING_SNAKE_CASE_ ) A: Dict = model.generate( input_ids=batch.input_ids , num_beams=8 , ) A: int = tokenizer.batch_decode( SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) A: Optional[int] = calculate_bleu(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(SCREAMING_SNAKE_CASE_ ) self.assertGreaterEqual(scores['''bleu'''] , SCREAMING_SNAKE_CASE_ )
334
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_tf_available, is_torch_available, ) UpperCamelCase = { '''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''], '''processing_speech_to_text''': ['''Speech2TextProcessor'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextTokenizer'''] try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''Speech2TextFeatureExtractor'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFSpeech2TextForConditionalGeneration''', '''TFSpeech2TextModel''', '''TFSpeech2TextPreTrainedModel''', ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Speech2TextForConditionalGeneration''', '''Speech2TextModel''', '''Speech2TextPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig from .processing_speech_to_text import SpeechaTextProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speech_to_text import SpeechaTextTokenizer try: if not is_speech_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_speech_to_text import ( TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, TFSpeechaTextForConditionalGeneration, TFSpeechaTextModel, TFSpeechaTextPreTrainedModel, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechaTextForConditionalGeneration, SpeechaTextModel, SpeechaTextPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
334
1
import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ): '''simple docstring''' _UpperCAmelCase : Optional[Any] = VideoToVideoSDPipeline _UpperCAmelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"} _UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"} _UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"} _UpperCAmelCase : str = False # No `output_type`. _UpperCAmelCase : List[str] = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def A ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) _snake_case = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=lowercase , set_alpha_to_one=lowercase , ) torch.manual_seed(0 ) _snake_case = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) _snake_case = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , ) _snake_case = CLIPTextModel(lowercase ) _snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) _snake_case = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def A ( self : str , lowercase : str , lowercase : List[str]=0 ): '''simple docstring''' _snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase ) if str(lowercase ).startswith('mps' ): _snake_case = torch.manual_seed(lowercase ) else: _snake_case = torch.Generator(device=lowercase ).manual_seed(lowercase ) _snake_case = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def A ( self : Any ): '''simple docstring''' _snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator _snake_case = self.get_dummy_components() _snake_case = VideoToVideoSDPipeline(**lowercase ) _snake_case = sd_pipe.to(lowercase ) sd_pipe.set_progress_bar_config(disable=lowercase ) _snake_case = self.get_dummy_inputs(lowercase ) _snake_case = 'np' _snake_case = sd_pipe(**lowercase ).frames _snake_case = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) _snake_case = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A ( self : Tuple ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowercase , expected_max_diff=5E-3 ) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def A ( self : Optional[Any] ): '''simple docstring''' pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' ) def A ( self : Union[str, Any] ): '''simple docstring''' pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' ) def A ( self : Tuple ): '''simple docstring''' pass def A ( self : Dict ): '''simple docstring''' return super().test_progress_bar() @slow @skip_mps class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' def A ( self : List[Any] ): '''simple docstring''' _snake_case = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames _snake_case = torch.Generator(device='cpu' ).manual_seed(0 ) _snake_case = torch.randn((1, 10, 3, 1_024, 576) , generator=lowercase ) _snake_case = video.to('cuda' ) _snake_case = 'Spiderman is surfing' _snake_case = pipe(lowercase , video=lowercase , generator=lowercase , num_inference_steps=3 , output_type='pt' ).frames _snake_case = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
282
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match''' _snake_case = nn.Parameter(__lowercase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match''' _snake_case = nn.Parameter(__lowercase ) def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]: # set torch weights for 1-to-1 comparison _snake_case = np.asarray(weights[0] ) _snake_case = np.asarray(weights[1] ) _snake_case = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , ) set_param( torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , ) def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]: # set torch weights for 1-to-1 comparison _snake_case = np.asarray(weights[0] ) _snake_case = np.asarray(weights[1] ) _snake_case = np.asarray(weights[2] ) _snake_case = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , ) set_param( torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , ) def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]: # layernorm 1 _snake_case = weights[0][0][0] _snake_case = np.asarray(layer_norm_a[0] ) _snake_case = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , ) # lsh weights + output _snake_case = weights[0][1] if len(__lowercase ) < 4: set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase ) else: set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase ) # intermediate weighs _snake_case = weights[2][0][1][2] # Chunked Feed Forward if len(__lowercase ) == 4: _snake_case = intermediate_weights[2] # layernorm 2 _snake_case = np.asarray(intermediate_weights[0][0] ) _snake_case = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , ) # intermediate dense _snake_case = np.asarray(intermediate_weights[1][0] ) _snake_case = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , ) # intermediate out _snake_case = np.asarray(intermediate_weights[4][0] ) _snake_case = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , ) def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]: # reformer model _snake_case = torch_model.reformer # word embeds _snake_case = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , ) if isinstance(weights[3] , __lowercase ): _snake_case = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): _snake_case = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'''{position_embeddings[emb_idx]} emb does not match''' _snake_case = nn.Parameter(torch.tensor(__lowercase ) ) _snake_case = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( __lowercase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): _snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(__lowercase , __lowercase , __lowercase ) # output layer norm _snake_case = np.asarray(weights[7][0] ) _snake_case = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , ) # output embeddings _snake_case = np.asarray(weights[9][0] ) _snake_case = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , ) def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]: # Initialise PyTorch model _snake_case = ReformerConfig.from_json_file(__lowercase ) print(f'''Building PyTorch model from configuration: {config}''' ) _snake_case = ReformerModelWithLMHead(__lowercase ) with open(__lowercase , 'rb' ) as f: _snake_case = pickle.load(__lowercase )['weights'] set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowercase ) if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained Reformer model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowerCamelCase : List[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
282
1
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging lowerCAmelCase__ : Optional[Any] =logging.get_logger(__name__) class __lowercase : """simple docstring""" _UpperCAmelCase = 42 _UpperCAmelCase = None @staticmethod def UpperCamelCase__ ( ): """simple docstring""" raise NotImplementedError def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" raise NotImplementedError def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" raise NotImplementedError def UpperCamelCase__ ( self ): """simple docstring""" if not self.is_available(): raise RuntimeError( F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def UpperCamelCase__ ( cls ): """simple docstring""" return F'''`pip install {cls.pip_package or cls.name}`''' class __lowercase (lowerCAmelCase_ ): """simple docstring""" _UpperCAmelCase = 'optuna' @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_optuna_available() def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return run_hp_search_optuna(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" return default_hp_space_optuna(__lowerCAmelCase ) class __lowercase (lowerCAmelCase_ ): """simple docstring""" _UpperCAmelCase = 'ray' _UpperCAmelCase = '\'ray[tune]\'' @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_ray_available() def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return run_hp_search_ray(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" return default_hp_space_ray(__lowerCAmelCase ) class __lowercase (lowerCAmelCase_ ): """simple docstring""" _UpperCAmelCase = 'sigopt' @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_sigopt_available() def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return run_hp_search_sigopt(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" return default_hp_space_sigopt(__lowerCAmelCase ) class __lowercase (lowerCAmelCase_ ): """simple docstring""" _UpperCAmelCase = 'wandb' @staticmethod def UpperCamelCase__ ( ): """simple docstring""" return is_wandb_available() def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ): """simple docstring""" return run_hp_search_wandb(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" return default_hp_space_wandb(__lowerCAmelCase ) lowerCAmelCase__ : Any ={ HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def a__ ( ): SCREAMING_SNAKE_CASE_ : int = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(A__ ) > 0: SCREAMING_SNAKE_CASE_ : List[Any] = available_backends[0].name if len(A__ ) > 1: logger.info( F'''{len(A__ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( 'No hyperparameter search backend available.\n' + '\n'.join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
358
from __future__ import annotations def a__ ( A__ ): return len(set(A__ ) ) == len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
162
0
"""simple docstring""" import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) ) for a, b in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertAlmostEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , delta=SCREAMING_SNAKE_CASE_ ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : int = GradientAccumulator() accumulator([tf.constant([1.0, 2.0] )] ) accumulator([tf.constant([-2.0, 1.0] )] ) accumulator([tf.constant([-1.0, 2.0] )] ) with self.assertRaises(SCREAMING_SNAKE_CASE_ ): accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] ) self.assertEqual(accumulator.step , 3 ) self.assertEqual(len(accumulator.gradients ) , 1 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = None ops.enable_eager_execution_internal() __UpperCAmelCase : Optional[Any] = tf.config.list_physical_devices("""CPU""" ) if len(SCREAMING_SNAKE_CASE_ ) == 1: tf.config.set_logical_device_configuration( physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) __UpperCAmelCase : Any = tf.config.list_logical_devices(device_type="""CPU""" ) __UpperCAmelCase : int = tf.distribute.MirroredStrategy(devices=devices[:2] ) with strategy.scope(): __UpperCAmelCase : Union[str, Any] = GradientAccumulator() __UpperCAmelCase : str = tf.Variable([4.0, 3.0] ) __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = create_optimizer(5e-5 , 10 , 5 ) __UpperCAmelCase : str = tf.Variable([0.0, 0.0] , trainable=SCREAMING_SNAKE_CASE_ ) def accumulate_on_replica(UpperCamelCase : Optional[int] ): accumulator([gradient] ) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) ) @tf.function def accumulate(UpperCamelCase : str , UpperCamelCase : Dict ): with strategy.scope(): __UpperCAmelCase : List[Any] = strategy.experimental_local_results(SCREAMING_SNAKE_CASE_ ) local_variables[0].assign(SCREAMING_SNAKE_CASE_ ) local_variables[1].assign(SCREAMING_SNAKE_CASE_ ) strategy.run(SCREAMING_SNAKE_CASE_ , args=(gradient_placeholder,) ) @tf.function def apply_grad(): with strategy.scope(): strategy.run(SCREAMING_SNAKE_CASE_ ) def _check_local_values(UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] ): __UpperCAmelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] ) self.assertListAlmostEqual(values[0].value() , SCREAMING_SNAKE_CASE_ , tol=1e-2 ) self.assertListAlmostEqual(values[1].value() , SCREAMING_SNAKE_CASE_ , tol=1e-2 ) accumulate([1.0, 2.0] , [-1.0, 1.0] ) accumulate([3.0, -1.0] , [-1.0, -1.0] ) accumulate([-2.0, 2.0] , [3.0, -2.0] ) self.assertEqual(accumulator.step , 3 ) _check_local_values([2.0, 3.0] , [1.0, -2.0] ) apply_grad() self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 ) accumulator.reset() self.assertEqual(accumulator.step , 0 ) _check_local_values([0.0, 0.0] , [0.0, 0.0] )
115
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 lowerCamelCase_ = data_utils.TransfoXLTokenizer lowerCamelCase_ = data_utils.TransfoXLCorpus lowerCamelCase_ = data_utils lowerCamelCase_ = data_utils def __magic_name__ ( __a : List[Any] , __a : str , __a : Optional[Any] , __a : List[str] ): '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , """rb""" ) as fp: UpperCamelCase__ = pickle.load(__a , encoding="""latin1""" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCamelCase__ = pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""] print(f"Save vocabulary to {pytorch_vocab_dump_path}" ) UpperCamelCase__ = corpus.vocab.__dict__ torch.save(__a , __a ) UpperCamelCase__ = corpus.__dict__ corpus_dict_no_vocab.pop("""vocab""" , __a ) UpperCamelCase__ = pytorch_dump_folder_path + """/""" + CORPUS_NAME print(f"Save dataset to {pytorch_dataset_dump_path}" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCamelCase__ = os.path.abspath(__a ) UpperCamelCase__ = os.path.abspath(__a ) print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCamelCase__ = TransfoXLConfig() else: UpperCamelCase__ = TransfoXLConfig.from_json_file(__a ) print(f"Building PyTorch model from configuration: {config}" ) UpperCamelCase__ = TransfoXLLMHeadModel(__a ) UpperCamelCase__ = load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model UpperCamelCase__ = os.path.join(__a , __a ) UpperCamelCase__ = os.path.join(__a , __a ) print(f"Save PyTorch model to {os.path.abspath(__a )}" ) torch.save(model.state_dict() , __a ) print(f"Save configuration file to {os.path.abspath(__a )}" ) with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) lowerCamelCase_ = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
244
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __a = logging.get_logger(__name__) __a = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' for attribute in key.split('''.''' ): UpperCAmelCase_ = getattr(_lowercase , _lowercase ) if weight_type is not None: UpperCAmelCase_ = getattr(_lowercase , _lowercase ).shape else: UpperCAmelCase_ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": UpperCAmelCase_ = value elif weight_type == "weight_g": UpperCAmelCase_ = value elif weight_type == "weight_v": UpperCAmelCase_ = value elif weight_type == "bias": UpperCAmelCase_ = value else: UpperCAmelCase_ = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ = [] UpperCAmelCase_ = fairseq_model.state_dict() UpperCAmelCase_ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ = False if "conv_layers" in name: load_conv_layer( _lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == '''group''' , ) UpperCAmelCase_ = True else: for key, mapped_key in MAPPING.items(): UpperCAmelCase_ = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): UpperCAmelCase_ = True if "*" in mapped_key: UpperCAmelCase_ = name.split(_lowercase )[0].split('''.''' )[-2] UpperCAmelCase_ = mapped_key.replace('''*''' , _lowercase ) if "weight_g" in name: UpperCAmelCase_ = '''weight_g''' elif "weight_v" in name: UpperCAmelCase_ = '''weight_v''' elif "weight" in name: UpperCAmelCase_ = '''weight''' elif "bias" in name: UpperCAmelCase_ = '''bias''' else: UpperCAmelCase_ = None set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) continue if not is_used: unused_weights.append(_lowercase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase_ = name.split('''.''' ) UpperCAmelCase_ = int(items[0] ) UpperCAmelCase_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) UpperCAmelCase_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) UpperCAmelCase_ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_lowercase ) @torch.no_grad() def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=True ): '''simple docstring''' if config_path is not None: UpperCAmelCase_ = HubertConfig.from_pretrained(_lowercase ) else: UpperCAmelCase_ = HubertConfig() if is_finetuned: if dict_path: UpperCAmelCase_ = Dictionary.load(_lowercase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_ = target_dict.pad_index UpperCAmelCase_ = target_dict.bos_index UpperCAmelCase_ = target_dict.eos_index UpperCAmelCase_ = len(target_dict.symbols ) UpperCAmelCase_ = os.path.join(_lowercase , '''vocab.json''' ) if not os.path.isdir(_lowercase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(_lowercase ) ) return os.makedirs(_lowercase , exist_ok=_lowercase ) with open(_lowercase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , _lowercase ) UpperCAmelCase_ = WavaVecaCTCTokenizer( _lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=_lowercase , ) UpperCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False UpperCAmelCase_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowercase , return_attention_mask=_lowercase , ) UpperCAmelCase_ = WavaVecaProcessor(feature_extractor=_lowercase , tokenizer=_lowercase ) processor.save_pretrained(_lowercase ) UpperCAmelCase_ = HubertForCTC(_lowercase ) else: UpperCAmelCase_ = HubertModel(_lowercase ) if is_finetuned: UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) UpperCAmelCase_ = model[0].eval() recursively_load_weights(_lowercase , _lowercase , _lowercase ) hf_wavavec.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __a = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
350
import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __a = logging.getLogger() def lowerCamelCase__ ( ): '''simple docstring''' UpperCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument('''-f''' ) UpperCAmelCase_ : Dict = parser.parse_args() return args.f class __a( _a ): """simple docstring""" def a__ ( self ) -> None: UpperCAmelCase_ : int = logging.StreamHandler(sys.stdout ) logger.addHandler(_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str: UpperCAmelCase_ : int = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0 ,'''run_glue_deebert.py''' ) with patch.object(_SCREAMING_SNAKE_CASE ,'''argv''' ,_SCREAMING_SNAKE_CASE ): UpperCAmelCase_ : List[str] = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(_SCREAMING_SNAKE_CASE ,0.6_66 ) @slow @require_torch_non_multi_gpu def a__ ( self ) -> List[str]: UpperCAmelCase_ : List[Any] = ''' --model_type roberta --model_name_or_path roberta-base --task_name MRPC --do_train --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --max_seq_length 128 --per_gpu_eval_batch_size=1 --per_gpu_train_batch_size=8 --learning_rate 2e-4 --num_train_epochs 3 --overwrite_output_dir --seed 42 --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --save_steps 0 --overwrite_cache --eval_after_first_stage '''.split() self.run_and_check(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --eval_each_highway --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Any = ''' --model_type roberta --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --task_name MRPC --do_eval --do_lower_case --data_dir ./tests/fixtures/tests_samples/MRPC/ --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage --plot_data_dir ./examples/deebert/results/ --max_seq_length 128 --early_exit_entropy 0.1 --eval_highway --overwrite_cache --per_gpu_eval_batch_size=1 '''.split() self.run_and_check(_SCREAMING_SNAKE_CASE )
235
0
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase=7 ) -> Any: '''simple docstring''' lowercase_ = None if token is not None: lowercase_ = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''} # The id of a workflow (not of a workflow run) lowercase_ = """636036""" lowercase_ = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' lowercase_ = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).json() return result["workflow_runs"] def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ = get_daily_ci_runs(__lowerCAmelCase ) lowercase_ = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": lowercase_ = workflow_run["""id"""] break return workflow_run_id def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int: '''simple docstring''' lowercase_ = get_last_daily_ci_runs(__lowerCAmelCase ) if workflow_run_id is not None: lowercase_ = get_artifacts_links(worflow_run_id=__lowerCAmelCase , token=__lowerCAmelCase ) for artifact_name in artifact_names: if artifact_name in artifacts_links: lowercase_ = artifacts_links[artifact_name] download_artifact( artifact_name=__lowerCAmelCase , artifact_url=__lowerCAmelCase , output_dir=__lowerCAmelCase , token=__lowerCAmelCase ) def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Any: '''simple docstring''' get_last_daily_ci_artifacts(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) lowercase_ = {} for artifact_name in artifact_names: lowercase_ = os.path.join(__lowerCAmelCase , F'''{artifact_name}.zip''' ) if os.path.isfile(__lowerCAmelCase ): lowercase_ = {} with zipfile.ZipFile(__lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__lowerCAmelCase ): # read the file with z.open(__lowerCAmelCase ) as f: lowercase_ = f.read().decode("""UTF-8""" ) return results
136
"""simple docstring""" def A ( snake_case :int ) -> int: __UpperCamelCase = [1] __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 0, 0, 0 __UpperCamelCase = ugly_nums[ia] * 2 __UpperCamelCase = ugly_nums[ia] * 3 __UpperCamelCase = ugly_nums[ia] * 5 for _ in range(1 , snake_case ): __UpperCamelCase = min(snake_case , snake_case , snake_case ) ugly_nums.append(snake_case ) if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __UpperCamelCase = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'''{ugly_numbers(2_0_0) = }''')
316
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __snake_case : Union[str, Any] = logging.get_logger(__name__) __snake_case : Optional[Any] = {'vocab_file': 'vocab.txt'} __snake_case : Optional[int] = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __snake_case : Dict = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __snake_case : List[str] = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class lowerCamelCase ( lowercase_ ): '''simple docstring''' __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_INIT_CONFIGURATION __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = ConvBertTokenizer def __init__( self : str , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Tuple="[SEP]" , lowerCAmelCase_ : Any="[PAD]" , lowerCAmelCase_ : Optional[Any]="[CLS]" , lowerCAmelCase_ : str="[MASK]" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Any , ) -> Tuple: '''simple docstring''' super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , do_lower_case=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , tokenize_chinese_chars=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ , **lowerCAmelCase_ , ) A__ : int =json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , lowerCAmelCase_ ) != do_lower_case or normalizer_state.get("""strip_accents""" , lowerCAmelCase_ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , lowerCAmelCase_ ) != tokenize_chinese_chars ): A__ : Union[str, Any] =getattr(lowerCAmelCase_ , normalizer_state.pop("""type""" ) ) A__ : List[str] =do_lower_case A__ : List[Any] =strip_accents A__ : List[Any] =tokenize_chinese_chars A__ : Dict =normalizer_class(**lowerCAmelCase_ ) A__ : Union[str, Any] =do_lower_case def lowercase__ ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict=None ) -> Union[str, Any]: '''simple docstring''' A__ : Union[str, Any] =[self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' A__ : Optional[Any] =[self.sep_token_id] A__ : List[Any] =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowercase__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' A__ : Tuple =self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ )
136
'''simple docstring''' import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __snake_case : List[str] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCamelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = XLMRobertaTokenizer __snake_case = XLMRobertaTokenizerFast __snake_case = True __snake_case = True def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing A__ : Any =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Union[str, Any] ="""<pad>""" A__ : Any =1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' A__ : int =list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(vocab_keys[-1] , """<mask>""" ) self.assertEqual(len(lowerCAmelCase_ ) , 10_02 ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 10_02 ) def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' A__ : List[Any] =XLMRobertaTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) A__ : Tuple =tokenizer.tokenize("""This is a test""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) A__ : Optional[int] =tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) A__ : Optional[int] =tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) A__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def lowercase__ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return A__ : Dict =(self.rust_tokenizer_class, """hf-internal-testing/tiny-xlm-roberta""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): A__ : List[str] =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Union[str, Any] =self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) A__ : Optional[Any] =tempfile.mkdtemp() A__ : Union[str, Any] =tokenizer_r.save_pretrained(lowerCAmelCase_ ) A__ : Union[str, Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) A__ : List[str] =tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Checks everything loads correctly in the same way A__ : Any =tokenizer_r.from_pretrained(lowerCAmelCase_ ) A__ : Union[str, Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase_ ) # Save tokenizer rust, legacy_format=True A__ : List[str] =tempfile.mkdtemp() A__ : List[str] =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ ) A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ ) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Checks everything loads correctly in the same way A__ : str =tokenizer_r.from_pretrained(lowerCAmelCase_ ) A__ : List[Any] =tokenizer_p.from_pretrained(lowerCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) shutil.rmtree(lowerCAmelCase_ ) # Save tokenizer rust, legacy_format=False A__ : List[str] =tempfile.mkdtemp() A__ : Dict =tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ ) A__ : List[Any] =tokenizer_p.save_pretrained(lowerCAmelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way A__ : Optional[int] =tokenizer_r.from_pretrained(lowerCAmelCase_ ) A__ : str =tokenizer_p.from_pretrained(lowerCAmelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) ) shutil.rmtree(lowerCAmelCase_ ) @cached_property def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' return XLMRobertaTokenizer.from_pretrained("""xlm-roberta-base""" ) def lowercase__ ( self : Tuple ) -> Tuple: '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase_ , f.name ) A__ : Dict =XLMRobertaTokenizer(f.name , keep_accents=lowerCAmelCase_ ) A__ : Optional[Any] =pickle.dumps(lowerCAmelCase_ ) pickle.loads(lowerCAmelCase_ ) def lowercase__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return A__ : Any =self.get_tokenizer() A__ : Any =self.get_rust_tokenizer() A__ : Optional[Any] ="""I was born in 92000, and this is falsé.""" A__ : List[str] =tokenizer.tokenize(lowerCAmelCase_ ) A__ : int =rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : str =tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) A__ : Dict =rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) A__ : Union[str, Any] =self.get_rust_tokenizer() A__ : Union[str, Any] =tokenizer.encode(lowerCAmelCase_ ) A__ : Optional[Any] =rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @slow def lowercase__ ( self : Dict ) -> int: '''simple docstring''' A__ : Optional[Any] ="""Hello World!""" A__ : Optional[Any] =[0, 3_53_78, 66_61, 38, 2] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' A__ : List[Any] =( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) A__ : Optional[Any] =[ 0, 32_93, 83, 10, 45_52, 49_89, 79_86, 6_78, 10, 59_15, 1_11, 17_94_59, 12_48_50, 4, 60_44, 2_37, 12, 6, 5, 6, 4, 67_80, 7_05, 15, 13_88, 44, 3_78, 1_01_14, 7_11, 1_52, 20, 6, 5, 2_23_76, 6_42, 12_21, 1_51_90, 3_41_53, 4_50, 56_08, 9_59, 11_19, 5_77_02, 1_36, 1_86, 47, 10_98, 2_93_67, 47, # 4426, # What fairseq tokenizes from "<unk>": "_<" # 3678, # What fairseq tokenizes from "<unk>": "unk" # 2740, # What fairseq tokenizes from "<unk>": ">" 3, # What we tokenize from "<unk>": "<unk>" 6, # Residue from the tokenization: an extra sentencepiece underline 4, 60_44, 2_37, 62_84, 5_09_01, 5_28, 31, 90, 34, 9_27, 2, ] # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer # xlmr.eval() # xlmr.encode(symbols) self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' # fmt: off A__ : List[Any] ={"""input_ids""": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="""xlm-roberta-base""" , revision="""d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3""" , )
136
1
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 10**9 ) -> int: _snake_case : Tuple = 1 _snake_case : List[Any] = 2 _snake_case : List[str] = 0 _snake_case : Optional[Any] = 0 _snake_case : Optional[Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value _snake_case : Union[str, Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(F'''{solution() = }''')
317
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=SCREAMING_SNAKE_CASE_ ) class snake_case ( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' snake_case_ : str = field(default="""question-answering-extractive""" ,metadata={"""include_in_asdict_even_if_is_default""": True} ) snake_case_ : ClassVar[Features] = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) snake_case_ : ClassVar[Features] = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) snake_case_ : str = "question" snake_case_ : str = "context" snake_case_ : str = "answers" @property def UpperCamelCase_ ( self : Any) -> Dict[str, str]: """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
317
1
def __A ( _lowercase=2_81_23 ): '''simple docstring''' _A = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i _A = set() _A = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(lowercase__ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
362
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __A ( self: Union[str, Any] ) -> Union[str, Any]: _A = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() _A = dict(zip(__A , range(len(__A ) ) ) ) _A = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } _A = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_60_00, '''return_attention_mask''': False, '''do_normalize''': True, } _A = tempfile.mkdtemp() _A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _A = os.path.join(self.tmpdirname , __A ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__A ) + '''\n''' ) # load decoder from hub _A = '''hf-internal-testing/ngram-beam-search-decoder''' def __A ( self: Tuple , **__A: str ) -> str: _A = self.add_kwargs_tokens_map.copy() kwargs.update(__A ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__A ) def __A ( self: Any , **__A: List[Any] ) -> Union[str, Any]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__A ) def __A ( self: List[Any] , **__A: Union[str, Any] ) -> int: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__A ) def __A ( self: List[str] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def __A ( self: List[str] ) -> Optional[Any]: _A = self.get_tokenizer() _A = self.get_feature_extractor() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) processor.save_pretrained(self.tmpdirname ) _A = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , __A ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , __A ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , __A ) def __A ( self: Optional[int] ) -> Union[str, Any]: _A = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match _A = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __A ( self: str ) -> Any: _A = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(__A , '''include''' ): WavaVecaProcessorWithLM( tokenizer=__A , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __A ( self: List[str] ) -> str: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = floats_list((3, 10_00) ) _A = feature_extractor(__A , return_tensors='''np''' ) _A = processor(__A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __A ( self: Union[str, Any] ) -> Optional[Any]: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = '''This is a test string''' _A = processor(text=__A ) _A = tokenizer(__A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __A ( self: List[str] , __A: Optional[int]=(2, 10, 16) , __A: Optional[int]=77 ) -> List[Any]: np.random.seed(__A ) return np.random.rand(*__A ) def __A ( self: List[Any] ) -> Optional[Any]: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits(shape=(10, 16) , seed=13 ) _A = processor.decode(__A ) _A = decoder.decode_beams(__A )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def __A ( self: str , __A: Any ) -> int: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: _A = processor.batch_decode(__A ) else: with get_context(__A ).Pool() as pool: _A = processor.batch_decode(__A , __A ) _A = list(__A ) with get_context('''fork''' ).Pool() as p: _A = decoder.decode_beams_batch(__A , __A ) _A ,_A ,_A = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(__A , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(__A , decoded_processor.logit_score ) self.assertListEqual(__A , decoded_processor.lm_score ) def __A ( self: Optional[Any] ) -> int: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits() _A = 15 _A = -20.0 _A = -4.0 _A = processor.batch_decode( __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , ) _A = decoded_processor_out.text _A = list(__A ) with get_context('''fork''' ).Pool() as pool: _A = decoder.decode_beams_batch( __A , __A , beam_width=__A , beam_prune_logp=__A , token_min_logp=__A , ) _A = [d[0][0] for d in decoded_decoder_out] _A = [d[0][2] for d in decoded_decoder_out] _A = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(__A , __A ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , __A ) self.assertTrue(np.array_equal(__A , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , __A , atol=1e-3 ) ) self.assertTrue(np.array_equal(__A , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , __A , atol=1e-3 ) ) def __A ( self: Optional[int] ) -> Dict: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) _A = self._get_dummy_logits() _A = 2.0 _A = 5.0 _A = -20.0 _A = True _A = processor.batch_decode( __A , alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , ) _A = decoded_processor_out.text _A = list(__A ) decoder.reset_params( alpha=__A , beta=__A , unk_score_offset=__A , lm_score_boundary=__A , ) with get_context('''fork''' ).Pool() as pool: _A = decoder.decode_beams_batch( __A , __A , ) _A = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(__A , __A ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , __A ) _A = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , __A ) def __A ( self: int ) -> Optional[Any]: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = processor.decoder.model_container[processor.decoder._model_key] _A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() _A = os.listdir(__A ) _A = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(__A , __A ) def __A ( self: Tuple ) -> Any: _A = snapshot_download('''hf-internal-testing/processor_with_lm''' ) _A = WavaVecaProcessorWithLM.from_pretrained(__A ) _A = processor.decoder.model_container[processor.decoder._model_key] _A = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() _A = os.listdir(__A ) _A = os.listdir(__A ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(__A , __A ) def __A ( self: List[str] ) -> Tuple: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = floats_list((3, 10_00) ) _A = processor_wavaveca(__A , return_tensors='''np''' ) _A = processor_auto(__A , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) _A = self._get_dummy_logits() _A = processor_wavaveca.batch_decode(__A ) _A = processor_auto.batch_decode(__A ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __A ( self: Optional[int] ) -> Any: _A = self.get_feature_extractor() _A = self.get_tokenizer() _A = self.get_decoder() _A = WavaVecaProcessorWithLM(tokenizer=__A , feature_extractor=__A , decoder=__A ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def __A ( __A: int , __A: List[str] ) -> Union[str, Any]: _A = [d[key] for d in offsets] return retrieved_list def __A ( self: Optional[Any] ) -> int: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = self._get_dummy_logits()[0] _A = processor.decode(__A , output_word_offsets=__A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__A , __A ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def __A ( self: Optional[Any] ) -> Tuple: _A = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) _A = self._get_dummy_logits() _A = processor.batch_decode(__A , output_word_offsets=__A ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(__A , __A ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(__A , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __A ( self: Optional[Any] ) -> Optional[Any]: import torch _A = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=__A ) _A = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) ) _A = iter(__A ) _A = next(__A ) _A = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) _A = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train _A = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): _A = model(__A ).logits.cpu().numpy() _A = processor.decode(logits[0] , output_word_offsets=__A ) _A = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate _A = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] _A = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , __A ) self.assertEqual(''' '''.join(self.get_from_offsets(__A , '''word''' ) ) , output.text ) # output times _A = torch.tensor(self.get_from_offsets(__A , '''start_time''' ) ) _A = torch.tensor(self.get_from_offsets(__A , '''end_time''' ) ) # fmt: off _A = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) _A = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) ) self.assertTrue(torch.allclose(__A , __A , atol=0.01 ) )
75
0
'''simple docstring''' import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase : '''simple docstring''' def __init__( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str=13 , lowerCAmelCase_ : Any=30 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : str=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : Union[str, Any]=37 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Tuple=10 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Tuple=2 , ) -> Tuple: '''simple docstring''' A__ : Optional[Any] =parent A__ : Union[str, Any] =batch_size A__ : List[str] =image_size A__ : int =patch_size A__ : Optional[int] =num_channels A__ : Optional[Any] =is_training A__ : List[str] =use_labels A__ : List[Any] =hidden_size A__ : Union[str, Any] =num_hidden_layers A__ : Any =num_attention_heads A__ : int =intermediate_size A__ : Tuple =hidden_act A__ : Tuple =hidden_dropout_prob A__ : Dict =attention_probs_dropout_prob A__ : List[str] =type_sequence_label_size A__ : Union[str, Any] =initializer_range A__ : Union[str, Any] =scope A__ : List[str] =encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ : Tuple =(image_size // patch_size) ** 2 A__ : int =num_patches + 1 def lowercase__ ( self : List[Any] ) -> str: '''simple docstring''' A__ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ : str =None if self.use_labels: A__ : str =ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ : List[str] =self.get_config() return config, pixel_values, labels def lowercase__ ( self : int ) -> str: '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> Dict: '''simple docstring''' A__ : List[Any] =ViTModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ : Optional[Any] =model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase__ ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str ) -> Optional[Any]: '''simple docstring''' A__ : Union[str, Any] =ViTForMaskedImageModeling(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ : Union[str, Any] =model(lowerCamelCase__ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ : Optional[Any] =1 A__ : List[str] =ViTForMaskedImageModeling(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ : Union[str, Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ : Optional[int] =model(lowerCamelCase__ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: '''simple docstring''' A__ : Optional[int] =self.type_sequence_label_size A__ : Union[str, Any] =ViTForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ : Any =model(lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ : int =1 A__ : str =ViTForImageClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() A__ : Optional[Any] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ : List[Any] =model(lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowercase__ ( self : str ) -> Optional[Any]: '''simple docstring''' A__ : Optional[Any] =self.prepare_config_and_inputs() ( A__ ) : Any =config_and_inputs A__ : str ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' __snake_case = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) __snake_case = ( {"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification} if is_torch_available() else {} ) __snake_case = True __snake_case = False __snake_case = False __snake_case = False def lowercase__ ( self : List[Any] ) -> int: '''simple docstring''' A__ : List[str] =ViTModelTester(self ) A__ : List[str] =ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 ) def lowercase__ ( self : int ) -> int: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def lowercase__ ( self : int ) -> Union[str, Any]: '''simple docstring''' pass def lowercase__ ( self : int ) -> Tuple: '''simple docstring''' A__ : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : int =model_class(lowerCamelCase__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ : Union[str, Any] =model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' A__ : Any =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ : List[str] =model_class(lowerCamelCase__ ) A__ : List[Any] =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ : Optional[Any] =[*signature.parameters.keys()] A__ : str =['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCamelCase__ ) def lowercase__ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' A__ : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' A__ : Dict =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ ) def lowercase__ ( self : Any ) -> List[Any]: '''simple docstring''' A__ : List[str] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ ) @slow def lowercase__ ( self : List[Any] ) -> List[str]: '''simple docstring''' for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : Union[str, Any] =ViTModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) def __lowerCamelCase ( ) -> Union[str, Any]: """simple docstring""" A__ : int =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def lowercase__ ( self : Dict ) -> List[Any]: '''simple docstring''' A__ : int =ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowerCamelCase__ ) A__ : Optional[int] =self.default_image_processor A__ : List[str] =prepare_img() A__ : Optional[int] =image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ ) # forward pass with torch.no_grad(): A__ : int =model(**lowerCamelCase__ ) # verify the logits A__ : Any =torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , lowerCamelCase__ ) A__ : Optional[Any] =torch.tensor([-0.2744, 0.8215, -0.0836] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow def lowercase__ ( self : List[str] ) -> Tuple: '''simple docstring''' A__ : int =ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowerCamelCase__ ) A__ : Optional[Any] =ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 ) A__ : Optional[Any] =prepare_img() A__ : Any =image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ) A__ : int =inputs.pixel_values.to(lowerCamelCase__ ) # forward pass with torch.no_grad(): A__ : Any =model(lowerCamelCase__ , interpolate_pos_encoding=lowerCamelCase__ ) # verify the logits A__ : str =torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ ) A__ : Union[str, Any] =torch.tensor( [[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(lowerCamelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def lowercase__ ( self : Dict ) -> Dict: '''simple docstring''' A__ : int =ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" ) A__ : Optional[int] =self.default_image_processor A__ : Dict =prepare_img() A__ : List[Any] =image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ) A__ : List[str] =inputs.pixel_values.to(lowerCamelCase__ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A__ : List[Any] =model(lowerCamelCase__ )
134
A_ :str = '''0.21.0''' from .accelerator import Accelerator from .big_modeling import ( cpu_offload, cpu_offload_with_hook, disk_offload, dispatch_model, init_empty_weights, init_on_device, load_checkpoint_and_dispatch, ) from .data_loader import skip_first_batches from .launchers import debug_launcher, notebook_launcher from .state import PartialState from .utils import ( DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, FullyShardedDataParallelPlugin, GradScalerKwargs, InitProcessGroupKwargs, find_executable_batch_size, infer_auto_device_map, is_rich_available, load_checkpoint_in_model, synchronize_rng_states, ) if is_rich_available(): from .utils import rich
71
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer A_ : Any = logging.get_logger(__name__) A_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} A_ : Dict = { 'vocab_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt' ), 'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt', 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json', 'distilbert-base-uncased-distilled-squad': ( 'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json', 'distilbert-base-cased-distilled-squad': ( 'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json' ), 'distilbert-base-german-cased': ( 'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json' ), 'distilbert-base-multilingual-cased': ( 'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json' ), }, } A_ : Any = { 'distilbert-base-uncased': 512, 'distilbert-base-uncased-distilled-squad': 512, 'distilbert-base-cased': 512, 'distilbert-base-cased-distilled-squad': 512, 'distilbert-base-german-cased': 512, 'distilbert-base-multilingual-cased': 512, } A_ : int = { 'distilbert-base-uncased': {'do_lower_case': True}, 'distilbert-base-uncased-distilled-squad': {'do_lower_case': True}, 'distilbert-base-cased': {'do_lower_case': False}, 'distilbert-base-cased-distilled-squad': {'do_lower_case': False}, 'distilbert-base-german-cased': {'do_lower_case': False}, 'distilbert-base-multilingual-cased': {'do_lower_case': False}, } class _a (__magic_name__ ): '''simple docstring''' UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES UpperCAmelCase__: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase__: Any = PRETRAINED_INIT_CONFIGURATION UpperCAmelCase__: List[str] = ['''input_ids''', '''attention_mask'''] UpperCAmelCase__: List[str] = DistilBertTokenizer def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ): super().__init__( A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , ) A__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , A__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , A__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , A__ ) != tokenize_chinese_chars ): A__ : Any = getattr(A__ , normalizer_state.pop("""type""" ) ) A__ : Optional[Any] = do_lower_case A__ : List[Any] = strip_accents A__ : Union[str, Any] = tokenize_chinese_chars A__ : Union[str, Any] = normalizer_class(**A__ ) A__ : Tuple = do_lower_case def __A ( self , A__ , A__=None ): A__ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __A ( self , A__ , A__ = None ): A__ : Optional[int] = [self.sep_token_id] A__ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __A ( self , A__ , A__ = None ): A__ : int = self._tokenizer.model.save(A__ , name=A__ ) return tuple(A__ )
141
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class _a : '''simple docstring''' def __init__( self , A__ , ): A__ : Any = parent A__ : Any = 13 A__ : Optional[Any] = 7 A__ : Union[str, Any] = 30 A__ : str = self.seq_length + self.mem_len A__ : Dict = 15 A__ : int = True A__ : Tuple = True A__ : Union[str, Any] = 99 A__ : Optional[Any] = [10, 50, 80] A__ : str = 32 A__ : Tuple = 32 A__ : Union[str, Any] = 4 A__ : Optional[Any] = 8 A__ : int = 128 A__ : List[Any] = 2 A__ : List[str] = 2 A__ : int = None A__ : List[str] = 1 A__ : Union[str, Any] = 0 A__ : List[str] = 3 A__ : int = self.vocab_size - 1 A__ : Optional[Any] = 0.0_1 def __A ( self ): A__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Optional[Any] = None if self.use_labels: A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ : Any = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def __A ( self ): random.seed(self.seed ) tf.random.set_seed(self.seed ) def __A ( self , A__ , A__ , A__ , A__ ): A__ : Dict = TFTransfoXLModel(A__ ) A__ , A__ : Tuple = model(A__ ).to_tuple() A__ : List[str] = {"""input_ids""": input_ids_a, """mems""": mems_a} A__ , A__ : str = model(A__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __A ( self , A__ , A__ , A__ , A__ ): A__ : Optional[int] = TFTransfoXLLMHeadModel(A__ ) A__ , A__ : int = model(A__ ).to_tuple() A__ : int = {"""input_ids""": input_ids_a, """labels""": lm_labels} A__ , A__ : Optional[Any] = model(A__ ).to_tuple() A__ , A__ : Union[str, Any] = model([input_ids_a, mems_a] ).to_tuple() A__ : Any = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} A__ , A__ : Tuple = model(A__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def __A ( self , A__ , A__ , A__ , A__ ): A__ : Any = TFTransfoXLForSequenceClassification(A__ ) A__ : Optional[Any] = model(A__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self ): A__ : Optional[Any] = self.prepare_config_and_inputs() ((A__) , (A__) , (A__) , (A__)) : List[Any] = config_and_inputs A__ : int = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class _a (__magic_name__ , __magic_name__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__: List[Any] = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) UpperCAmelCase__: Optional[Any] = () if is_tf_available() else () UpperCAmelCase__: int = ( { '''feature-extraction''': TFTransfoXLModel, '''text-classification''': TFTransfoXLForSequenceClassification, '''text-generation''': TFTransfoXLLMHeadModel, '''zero-shot''': TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented UpperCAmelCase__: Optional[int] = False UpperCAmelCase__: Optional[int] = False UpperCAmelCase__: Tuple = False UpperCAmelCase__: List[str] = False def __A ( self , A__ , A__ , A__ , A__ , A__ ): if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def __A ( self ): A__ : Tuple = TFTransfoXLModelTester(self ) A__ : List[Any] = ConfigTester(self , config_class=A__ , d_embed=37 ) def __A ( self ): self.config_tester.run_common_tests() def __A ( self ): self.model_tester.set_seed() A__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*A__ ) def __A ( self ): self.model_tester.set_seed() A__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*A__ ) def __A ( self ): A__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A__ ) def __A ( self ): A__ , A__ : int = self.model_tester.prepare_config_and_inputs_for_common() A__ : Union[str, Any] = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: A__ : Any = model_class(A__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: A__ : Optional[Any] = model.get_output_embeddings() assert isinstance(A__ , tf.keras.layers.Layer ) A__ : Tuple = model.get_bias() assert name is None else: A__ : Dict = model.get_output_embeddings() assert x is None A__ : int = model.get_bias() assert name is None def __A ( self ): # TODO JP: Make TransfoXL XLA compliant pass @slow def __A ( self ): for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ : List[Any] = TFTransfoXLModel.from_pretrained(A__ ) self.assertIsNotNone(A__ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def __A ( self ): pass @require_tf class _a (unittest.TestCase ): '''simple docstring''' @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def __A ( self ): A__ : List[Any] = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off A__ : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off A__ : Dict = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> A__ : Any = model.generate(A__ , max_length=200 , do_sample=A__ ) self.assertListEqual(output_ids[0].numpy().tolist() , A__ )
141
1
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def lowerCamelCase__ ( A__ : Union[str, Any] ): '''simple docstring''' __lowerCamelCase = test_results.split(""" """ ) __lowerCamelCase = 0 __lowerCamelCase = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __lowerCamelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1] for i, expression in enumerate(A__ ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def lowerCamelCase__ ( A__ : Optional[int] ): '''simple docstring''' __lowerCamelCase = {} __lowerCamelCase = None __lowerCamelCase = False for line in failures_short_lines.split("""\n""" ): if re.search(R"""_ \[doctest\]""" , A__ ): __lowerCamelCase = True __lowerCamelCase = line.split(""" """ )[2] elif in_error and not line.split(""" """ )[0].isdigit(): __lowerCamelCase = line __lowerCamelCase = False return failures class lowerCamelCase__: def __init__( self: int , UpperCamelCase_: str , UpperCamelCase_: Dict ): __lowerCamelCase = title __lowerCamelCase = doc_test_results["""time_spent"""].split(""",""" )[0] __lowerCamelCase = doc_test_results["""success"""] __lowerCamelCase = doc_test_results["""failures"""] __lowerCamelCase = self.n_success + self.n_failures # Failures and success of the modeling tests __lowerCamelCase = doc_test_results @property def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [self._time_spent] __lowerCamelCase = 0 for time in time_spent: __lowerCamelCase = time.split(""":""" ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(UpperCamelCase_ ) == 1: __lowerCamelCase = [0, 0, time_parts[0]] __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return F'{int(UpperCamelCase_ )}h{int(UpperCamelCase_ )}m{int(UpperCamelCase_ )}s' @property def lowerCAmelCase__ ( self: List[str] ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def lowerCAmelCase__ ( self: Union[str, Any] ): return { "type": "section", "text": { "type": "plain_text", "text": F'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def lowerCAmelCase__ ( self: Optional[int] ): return { "type": "section", "text": { "type": "plain_text", "text": ( F'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in' F' {self.time}.' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } @property def lowerCAmelCase__ ( self: str ): __lowerCamelCase = 40 __lowerCamelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase_ , UpperCamelCase_ )} __lowerCamelCase = """""" for category, failures in category_failures.items(): if len(UpperCamelCase_ ) == 0: continue if report != "": report += "\n\n" report += F'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(UpperCamelCase_ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'The following examples had failures:\n\n\n{report}\n', }, } @property def lowerCAmelCase__ ( self: List[str] ): __lowerCamelCase = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(UpperCamelCase_ ) @staticmethod def lowerCAmelCase__ ( ): __lowerCamelCase = [ { """type""": """section""", """text""": { """type""": """plain_text""", """text""": """There was an issue running the tests.""", }, """accessory""": { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True}, """url""": F'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}', }, } ] print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(UpperCamelCase_ )} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: int ): print("""Sending the following payload""" ) print(json.dumps({"""blocks""": json.loads(self.payload )} ) ) __lowerCamelCase = F'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else """All tests passed.""" __lowerCamelCase = client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase_ , ) def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Tuple ): __lowerCamelCase = """""" for key, value in failures.items(): __lowerCamelCase = value[:2_00] + """ [Truncated]""" if len(UpperCamelCase_ ) > 2_50 else value failures_text += F'*{key}*\n_{value}_\n\n' __lowerCamelCase = job_name __lowerCamelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}} if job_link is not None: __lowerCamelCase = { """type""": """button""", """text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True}, """url""": job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def lowerCAmelCase__ ( self: List[Any] ): if self.thread_ts is None: raise ValueError("""Can only post reply if a post has been made.""" ) __lowerCamelCase = self.doc_test_results.pop("""job_link""" ) self.doc_test_results.pop("""failures""" ) self.doc_test_results.pop("""success""" ) self.doc_test_results.pop("""time_spent""" ) __lowerCamelCase = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase_ : t[0] ) for job, job_result in sorted_dict: if len(job_result["""failures"""] ): __lowerCamelCase = F'*Num failures* :{len(job_result["failed"] )} \n' __lowerCamelCase = job_result["""failures"""] __lowerCamelCase = self.get_reply_blocks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , text=UpperCamelCase_ ) print("""Sending the following reply""" ) print(json.dumps({"""blocks""": blocks} ) ) client.chat_postMessage( channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'Results for {job}' , blocks=UpperCamelCase_ , thread_ts=self.thread_ts["""ts"""] , ) time.sleep(1 ) def lowerCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = os.environ["""GITHUB_RUN_ID"""] __lowerCamelCase = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100' __lowerCamelCase = requests.get(A__ ).json() __lowerCamelCase = {} try: jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) __lowerCamelCase = math.ceil((result["""total_count"""] - 100) / 100 ) for i in range(A__ ): __lowerCamelCase = requests.get(url + f'&page={i + 2}' ).json() jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} ) return jobs except Exception as e: print("""Unknown error, could not fetch links.""" , A__ ) return {} def lowerCamelCase__ ( A__ : str ): '''simple docstring''' __lowerCamelCase = {} if os.path.exists(A__ ): __lowerCamelCase = os.listdir(A__ ) for file in files: try: with open(os.path.join(A__ , A__ ) , encoding="""utf-8""" ) as f: __lowerCamelCase = f.read() except UnicodeDecodeError as e: raise ValueError(f'Could not open {os.path.join(A__ , A__ )}.' ) from e return _artifact def lowerCamelCase__ ( ): '''simple docstring''' class lowerCamelCase__: def __init__( self: Optional[Any] , UpperCamelCase_: str ): __lowerCamelCase = name __lowerCamelCase = [] def __str__( self: List[str] ): return self.name def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: str ): self.paths.append({"""name""": self.name, """path""": path} ) __lowerCamelCase = {} __lowerCamelCase = filter(os.path.isdir , os.listdir() ) for directory in directories: __lowerCamelCase = directory if artifact_name not in _available_artifacts: __lowerCamelCase = Artifact(A__ ) _available_artifacts[artifact_name].add_path(A__ ) return _available_artifacts if __name__ == "__main__": UpperCAmelCase_ = get_job_links() UpperCAmelCase_ = retrieve_available_artifacts() UpperCAmelCase_ = collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' UpperCAmelCase_ = { v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job UpperCAmelCase_ = github_actions_job_links.get('run_doctests') UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0] UpperCAmelCase_ = retrieve_artifact(artifact_path['name']) if "stats" in artifact: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats']) UpperCAmelCase_ = failed UpperCAmelCase_ = success UpperCAmelCase_ = time_spent[1:-1] + ', ' UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): UpperCAmelCase_ = line.replace('FAILED ', '') UpperCAmelCase_ = line.split()[0].replace('\n', '') if "::" in line: UpperCAmelCase_ , UpperCAmelCase_ = line.split('::') else: UpperCAmelCase_ , UpperCAmelCase_ = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): UpperCAmelCase_ = docs[file_regex] doc_test_results[category]["failed"].append(test) UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A' UpperCAmelCase_ = failure break UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
12
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __lowerCAmelCase : Optional[int] =logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : List[str] ={ 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } __lowerCAmelCase : Optional[int] ={ 'gpt-neox-20b': 2_0_4_8, } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''input_ids''', '''attention_mask'''] def __init__( self :int , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :str="<|endoftext|>" , lowerCAmelCase__ :Dict="<|endoftext|>" , lowerCAmelCase__ :Union[str, Any]=False , **lowerCAmelCase__ :List[str] , ) -> Any: super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , lowerCAmelCase__ ) != add_prefix_space: __SCREAMING_SNAKE_CASE : List[str] = getattr(lowerCAmelCase__ , pre_tok_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : str = add_prefix_space __SCREAMING_SNAKE_CASE : Any = pre_tok_class(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: __SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :"Conversation" ) -> List[int]: __SCREAMING_SNAKE_CASE : Optional[Any] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) + [self.eos_token_id] ) if len(lowerCAmelCase__ ) > self.model_max_length: __SCREAMING_SNAKE_CASE : List[str] = input_ids[-self.model_max_length :] return input_ids
9
0
"""simple docstring""" import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset __A = pd.read_csv( """https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/""" """position_salaries.csv""" ) __A = dataset.iloc[:, 1:2].values __A = dataset.iloc[:, 2].values __A , __A , __A , __A = train_test_split(X, y, test_size=0.2, random_state=0) __A = PolynomialFeatures(degree=4) __A = poly_reg.fit_transform(X) __A = LinearRegression() pol_reg.fit(X_poly, y) def __A () ->Any: """simple docstring""" plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='red' ) plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color='blue' ) plt.title('Truth or Bluff (Linear Regression)' ) plt.xlabel('Position level' ) plt.ylabel('Salary' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
254
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase = "▁" , __UpperCAmelCase = True , __UpperCAmelCase = "<unk>" , __UpperCAmelCase = "</s>" , __UpperCAmelCase = "<pad>" , ): '''simple docstring''' lowerCAmelCase__ :Tuple = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } lowerCAmelCase__ :Optional[int] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowerCAmelCase__ :Any = token_dict['token'] lowerCAmelCase__ :int = Tokenizer(Unigram() ) lowerCAmelCase__ :Tuple = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) lowerCAmelCase__ :Any = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ), pre_tokenizers.Digits(individual_digits=__UpperCAmelCase ), pre_tokenizers.Punctuation(), ] ) lowerCAmelCase__ :List[str] = decoders.Metaspace(replacement=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase ) lowerCAmelCase__ :Tuple = TemplateProcessing( single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) lowerCAmelCase__ :Optional[int] = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(__UpperCAmelCase , __UpperCAmelCase ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ): '''simple docstring''' lowerCAmelCase__ :int = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ :int = [files] self._tokenizer.train(__UpperCAmelCase , trainer=__UpperCAmelCase ) self.add_unk_id() def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = 8_0_0_0 , __UpperCAmelCase = True , ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = trainers.UnigramTrainer( vocab_size=__UpperCAmelCase , special_tokens=self.special_tokens_list , show_progress=__UpperCAmelCase , ) self._tokenizer.train_from_iterator(__UpperCAmelCase , trainer=__UpperCAmelCase ) self.add_unk_id() def snake_case ( self ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = json.loads(self._tokenizer.to_str() ) lowerCAmelCase__ :List[str] = self.special_tokens['unk']['id'] lowerCAmelCase__ :Union[str, Any] = Tokenizer.from_str(json.dumps(__UpperCAmelCase ) )
254
1
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _a( UpperCamelCase__ : int=None ): '''simple docstring''' if subparsers is not None: SCREAMING_SNAKE_CASE__ : Union[str, Any] =subparsers.add_parser('''env''' ) else: SCREAMING_SNAKE_CASE__ : str =argparse.ArgumentParser('''Accelerate env command''' ) parser.add_argument( '''--config_file''', default=__a, help='''The config file to use for the default values in the launching script.''' ) if subparsers is not None: parser.set_defaults(func=__a ) return parser def _a( UpperCamelCase__ : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int =torch.__version__ SCREAMING_SNAKE_CASE__ : List[Any] =torch.cuda.is_available() SCREAMING_SNAKE_CASE__ : Optional[int] =is_xpu_available() SCREAMING_SNAKE_CASE__ : Optional[int] =is_npu_available() SCREAMING_SNAKE_CASE__ : Dict ='''Not found''' # Get the default from the config file. if args.config_file is not None or os.path.isfile(__a ): SCREAMING_SNAKE_CASE__ : Dict =load_config_from_file(args.config_file ).to_dict() SCREAMING_SNAKE_CASE__ : List[Any] ={ '''`Accelerate` version''': version, '''Platform''': platform.platform(), '''Python version''': platform.python_version(), '''Numpy version''': np.__version__, '''PyTorch version (GPU?)''': f"{pt_version} ({pt_cuda_available})", '''PyTorch XPU available''': str(__a ), '''PyTorch NPU available''': str(__a ), '''System RAM''': f"{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB", } if pt_cuda_available: SCREAMING_SNAKE_CASE__ : Any =torch.cuda.get_device_name() print('''\nCopy-and-paste the text below in your GitHub issue\n''' ) print('''\n'''.join([f"- {prop}: {val}" for prop, val in info.items()] ) ) print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' ) SCREAMING_SNAKE_CASE__ : int =( '''\n'''.join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(__a, __a ) else f"\t{accelerate_config}" ) print(__a ) SCREAMING_SNAKE_CASE__ : int =accelerate_config return info def _a( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] =env_command_parser() SCREAMING_SNAKE_CASE__ : Optional[int] =parser.parse_args() env_command(__a ) return 0 if __name__ == "__main__": raise SystemExit(main())
152
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class __A( nn.Module ): """simple docstring""" def __init__(self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ): super().__init__() UpperCamelCase__ = nn.ModuleList( [ TransformeraDModel( num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCamelCase__ = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCamelCase__ = [77, 2_57] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCamelCase__ = [1, 0] def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ): UpperCamelCase__ = hidden_states UpperCamelCase__ = [] UpperCamelCase__ = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCamelCase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCamelCase__ = self.transformer_index_for_condition[i] UpperCamelCase__ = self.transformers[transformer_index]( SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCamelCase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCamelCase__ = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
244
0
"""simple docstring""" import qiskit def __A ( a_ :int , a_ :int) -> qiskit.result.counts.Counts: __a : Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''') # Create a Quantum Circuit acting on the q register __a : Optional[Any] = qiskit.QuantumCircuit(a_ , a_) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0) circuit.x(1) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1]) # Execute the circuit on the qasm simulator __a : Any = qiskit.execute(a_ , a_ , shots=10_00) # Return the histogram data of the results of the experiment. return job.result().get_counts(a_) if __name__ == "__main__": A = single_qubit_measure(2, 2) print(F'Total count for various states are: {counts}')
357
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class __lowercase ( _UpperCamelCase ): '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=1 , ): __a : Dict = parent __a : str = batch_size __a : Union[str, Any] = seq_length __a : Any = is_training __a : int = use_input_mask __a : Optional[int] = use_token_type_ids __a : int = use_labels __a : int = vocab_size __a : int = hidden_size __a : str = num_hidden_layers __a : str = num_attention_heads __a : Any = intermediate_size __a : Union[str, Any] = hidden_act __a : Optional[int] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : int = max_position_embeddings __a : Union[str, Any] = type_vocab_size __a : List[str] = type_sequence_label_size __a : List[str] = initializer_range __a : Optional[int] = num_labels __a : List[str] = num_choices __a : int = scope __a : Union[str, Any] = q_groups __a : Dict = k_groups __a : List[str] = v_groups __a : Any = post_attention_groups __a : Optional[int] = intermediate_groups __a : List[str] = output_groups def _lowerCamelCase ( self ): __a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a : Optional[Any] = None if self.use_input_mask: __a : int = random_attention_mask([self.batch_size, self.seq_length] ) __a : List[str] = None __a : Union[str, Any] = None __a : int = None if self.use_labels: __a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __a : Dict = ids_tensor([self.batch_size] , self.num_choices ) __a : int = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self ): return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Dict = SqueezeBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Optional[Any] = model(_UpperCAmelCase , _UpperCAmelCase ) __a : Dict = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Optional[Any] = SqueezeBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : int = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Optional[int] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Any = self.num_labels __a : List[Any] = SqueezeBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : int = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[Any] = self.num_labels __a : List[str] = SqueezeBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : Any = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : List[str] = self.num_choices __a : Union[str, Any] = SqueezeBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __a : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __a : Union[str, Any] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowerCamelCase ( self ): __a : Any = self.prepare_config_and_inputs() ((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Optional[Any] = config_and_inputs __a : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) __lowerCAmelCase = ( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) __lowerCAmelCase = False __lowerCAmelCase = True __lowerCAmelCase = False def _lowerCamelCase ( self ): __a : Union[str, Any] = SqueezeBertModelTester(self ) __a : Dict = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 ) def _lowerCamelCase ( self ): self.config_tester.run_common_tests() def _lowerCamelCase ( self ): __a : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase ) def _lowerCamelCase ( self ): __a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase ) @slow def _lowerCamelCase ( self ): for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __a : Any = SqueezeBertModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_torch class __lowercase ( unittest.TestCase ): '''simple docstring''' @slow def _lowerCamelCase ( self ): __a : int = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) __a : Tuple = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) __a : List[str] = model(_UpperCAmelCase )[0] __a : int = torch.Size((1, 3) ) self.assertEqual(output.shape , _UpperCAmelCase ) __a : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] ) self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-4 ) )
188
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument A__ : str = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] ) -> int: # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model __lowerCamelCase : Union[str, Any] = list(s_dict.keys() ) for key in keys: __lowerCamelCase : Any = R'.*/layers_(\d+)' __lowerCamelCase : Union[str, Any] = key if re.match(UpperCAmelCase_ , UpperCAmelCase_ ): __lowerCamelCase : Union[str, Any] = re.sub(R'layers_(\d+)' , R'block/\1/layer' , UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = R'(encoder|decoder)\/' if re.match(UpperCAmelCase_ , UpperCAmelCase_ ): __lowerCamelCase : Union[str, Any] = re.match(UpperCAmelCase_ , UpperCAmelCase_ ).groups() if groups[0] == "encoder": __lowerCamelCase : int = re.sub(R'/mlp/' , R'/1/mlp/' , UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , UpperCAmelCase_ ) elif groups[0] == "decoder": __lowerCamelCase : str = re.sub(R'/mlp/' , R'/2/mlp/' , UpperCAmelCase_ ) __lowerCamelCase : Any = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , UpperCAmelCase_ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __lowerCamelCase : List[Any] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ ) print(F'{key} -> {new_key}' ) __lowerCamelCase : int = s_dict.pop(UpperCAmelCase_ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowerCamelCase : Optional[int] = s_dict[ 'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowerCamelCase : List[Any] = s_dict[ 'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __lowerCamelCase : List[Any] = s_dict[key].shape[0] __lowerCamelCase : Optional[int] = s_dict[key] for idx in range(UpperCAmelCase_ ): __lowerCamelCase : str = expert_weihts[idx] print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(UpperCAmelCase_ ) return s_dict A__ : Union[str, Any] = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> str: # Convert a google style config to the hugging face fromat import regex as re with open(UpperCAmelCase_ , 'r' ) as f: __lowerCamelCase : Optional[int] = f.read() __lowerCamelCase : Optional[Any] = re.findall(R'(.*) = ([0-9.]*)' , UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __lowerCamelCase : List[str] = float(UpperCAmelCase_ ) if '.' in value else int(UpperCAmelCase_ ) __lowerCamelCase : Optional[int] = re.findall(R'(.*activations) = \(\'(.*)\',\)' , UpperCAmelCase_ )[0] __lowerCamelCase : Tuple = str(activation[1] ) __lowerCamelCase : Any = num_experts __lowerCamelCase : List[Any] = SwitchTransformersConfig(**UpperCAmelCase_ ) return config def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Any="./" , UpperCAmelCase_ : Any=8 ) -> Optional[int]: # Initialise PyTorch model print(F'Loading flax weights from : {flax_checkpoint_path}' ) __lowerCamelCase : List[Any] = checkpoints.load_tax_checkpoint(UpperCAmelCase_ ) if gin_file is not None: __lowerCamelCase : List[str] = convert_gin_to_config(UpperCAmelCase_ , UpperCAmelCase_ ) else: __lowerCamelCase : List[str] = SwitchTransformersConfig.from_pretrained(UpperCAmelCase_ ) __lowerCamelCase : List[str] = SwitchTransformersForConditionalGeneration(UpperCAmelCase_ ) __lowerCamelCase : Union[str, Any] = flax_params['target'] __lowerCamelCase : str = flatten_dict(UpperCAmelCase_ , sep='/' ) __lowerCamelCase : List[str] = rename_keys(UpperCAmelCase_ ) __lowerCamelCase : Optional[Any] = unflatten_dict(UpperCAmelCase_ , sep='/' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(UpperCAmelCase_ , UpperCAmelCase_ ) print(F'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": A__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--switch_t5x_checkpoint_path""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the""" """ model architecture. If not provided, a `gin_file` has to be provided.""" ), ) parser.add_argument( """--gin_file""", default=None, type=str, required=False, help="""Path to the gin config file. If not provided, a `config_file` has to be passed """, ) parser.add_argument( """--config_name""", default=None, type=str, required=False, help="""Config name of SwitchTransformers model.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output pytorch model.""" ) parser.add_argument("""--num_experts""", default=8, type=int, required=False, help="""Number of experts""") A__ : List[Any] = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
185
'''simple docstring''' import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @slow def lowercase_ ( self ) -> List[str]: __lowerCamelCase : Any = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) __lowerCamelCase : Tuple = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(SCREAMING_SNAKE_CASE_ ) from datasets import load_dataset __lowerCamelCase : str = load_dataset('nielsr/rvlcdip-demo' ) __lowerCamelCase : List[Any] = dataset['train'][0]['image'].convert('RGB' ) __lowerCamelCase : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): __lowerCamelCase : str = model(**SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = outputs.logits __lowerCamelCase : List[Any] = torch.Size((1, 16) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[Any] = torch.tensor( [-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
185
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class _A ( unittest.TestCase ): @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __UpperCAmelCase : List[str] = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[str] = TFAutoModel.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = AutoModel.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __UpperCAmelCase : int = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : int = AutoModelForPreTraining.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> List[Any]: '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Any = TFAutoModelForCausalLM.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( __UpperCAmelCase , output_loading_info=__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : int = AutoModelForCausalLM.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( __UpperCAmelCase , output_loading_info=__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> Dict: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> List[str]: '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = TFAutoModelForMaskedLM.from_pretrained( __UpperCAmelCase , output_loading_info=__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Optional[int] = AutoModelForMaskedLM.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = AutoModelForMaskedLM.from_pretrained( __UpperCAmelCase , output_loading_info=__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> Dict: '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained( __UpperCAmelCase , output_loading_info=__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained( __UpperCAmelCase , output_loading_info=__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __UpperCAmelCase : Dict = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) __UpperCAmelCase : int = AutoModelForQuestionAnswering.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 14_410 ) __UpperCAmelCase : str = AutoModelWithLMHead.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 14_410 ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(__UpperCAmelCase , from_pt=__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 14_410 ) __UpperCAmelCase : Dict = AutoModelWithLMHead.from_pretrained(__UpperCAmelCase , from_tf=__UpperCAmelCase ) self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__UpperCAmelCase ) , 14_410 )
16
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class _A : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : str = is_training __UpperCAmelCase : Union[str, Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Optional[Any] = use_labels __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : str = num_attention_heads __UpperCAmelCase : Optional[Any] = intermediate_size __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : List[str] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Dict = type_vocab_size __UpperCAmelCase : List[Any] = type_sequence_label_size __UpperCAmelCase : List[Any] = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : str = num_choices __UpperCAmelCase : List[Any] = scope def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Union[str, Any] = None if self.use_labels: __UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Dict = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __A ( self ) -> Optional[Any]: '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = LlamaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[str] = True __UpperCAmelCase : List[str] = LlamaModel(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , ) __UpperCAmelCase : Tuple = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Any = True __UpperCAmelCase : Tuple = LlamaForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() # first forward pass __UpperCAmelCase : Optional[int] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase , ) __UpperCAmelCase : Union[str, Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __UpperCAmelCase : List[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = torch.cat([input_mask, next_mask] , dim=-1 ) __UpperCAmelCase : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] __UpperCAmelCase : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["""hidden_states"""][0] # select random slice __UpperCAmelCase : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach() __UpperCAmelCase : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-3 ) ) def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Any = config_and_inputs __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): _SCREAMING_SNAKE_CASE : Optional[int] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _SCREAMING_SNAKE_CASE : Any = (LlamaForCausalLM,) if is_torch_available() else () _SCREAMING_SNAKE_CASE : List[str] = ( { "feature-extraction": LlamaModel, "text-classification": LlamaForSequenceClassification, "text-generation": LlamaForCausalLM, "zero-shot": LlamaForSequenceClassification, } if is_torch_available() else {} ) _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : List[str] = False def __A ( self ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = LlamaModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __A ( self ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : str = type self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __A ( self ) -> List[str]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = 3 __UpperCAmelCase : Optional[Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> List[Any]: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[int] = 3 __UpperCAmelCase : Optional[Any] = """single_label_classification""" __UpperCAmelCase : int = input_dict["""input_ids"""] __UpperCAmelCase : List[Any] = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __UpperCAmelCase : Tuple = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Optional[Any] = 3 __UpperCAmelCase : str = """multi_label_classification""" __UpperCAmelCase : Union[str, Any] = input_dict["""input_ids"""] __UpperCAmelCase : int = input_ids.ne(1 ).to(__UpperCAmelCase ) __UpperCAmelCase : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __UpperCAmelCase : Dict = LlamaForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() __UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __A ( self ) -> Dict: '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __A ( self , __UpperCAmelCase ) -> Tuple: '''simple docstring''' __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : List[Any] = ids_tensor([1, 10] , config.vocab_size ) __UpperCAmelCase : str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) original_model.to(__UpperCAmelCase ) original_model.eval() __UpperCAmelCase : int = original_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = original_model(__UpperCAmelCase ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights __UpperCAmelCase : Dict = {"""type""": scaling_type, """factor""": 10.0} __UpperCAmelCase : Optional[Any] = LlamaModel(__UpperCAmelCase ) scaled_model.to(__UpperCAmelCase ) scaled_model.eval() __UpperCAmelCase : Optional[Any] = scaled_model(__UpperCAmelCase ).last_hidden_state __UpperCAmelCase : List[str] = scaled_model(__UpperCAmelCase ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-5 ) ) @require_torch class _A ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[int] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) __UpperCAmelCase : int = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[Any] = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) __UpperCAmelCase : str = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __A ( self ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : Union[str, Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) __UpperCAmelCase : Union[str, Any] = model(torch.tensor(__UpperCAmelCase ) ) # Expected mean on dim = -1 __UpperCAmelCase : Dict = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # slicing logits[0, 0, 0:30] # fmt: off __UpperCAmelCase : Any = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __A ( self ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338] __UpperCAmelCase : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) __UpperCAmelCase : List[Any] = model(torch.tensor(__UpperCAmelCase ) ) __UpperCAmelCase : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , __UpperCAmelCase , atol=1E-2 , rtol=1E-2 ) # fmt: off __UpperCAmelCase : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , __UpperCAmelCase , atol=1E-5 , rtol=1E-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __A ( self ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" __UpperCAmelCase : Dict = """Simply put, the theory of relativity states that """ __UpperCAmelCase : int = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) __UpperCAmelCase : int = tokenizer.encode(__UpperCAmelCase , return_tensors="""pt""" ) __UpperCAmelCase : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=__UpperCAmelCase ) # greedy generation outputs __UpperCAmelCase : Tuple = model.generate(__UpperCAmelCase , max_new_tokens=64 , top_p=__UpperCAmelCase , temperature=1 , do_sample=__UpperCAmelCase ) __UpperCAmelCase : Optional[int] = tokenizer.decode(generated_ids[0] , skip_special_tokens=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
16
1
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __snake_case =get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( __lowercase , unittest.TestCase ): lowerCamelCase : List[str] = AlbertTokenizer lowerCamelCase : Dict = AlbertTokenizerFast lowerCamelCase : Optional[Any] = True lowerCamelCase : Any = True lowerCamelCase : str = True def __UpperCAmelCase ( self : int ) -> int: super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase = AlbertTokenizer(UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[str] ) -> Union[str, Any]: lowerCAmelCase = 'this is a test' lowerCAmelCase = 'this is a test' return input_text, output_text def __UpperCAmelCase ( self : List[str] ) -> List[Any]: lowerCAmelCase = '<pad>' lowerCAmelCase = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__ ) , UpperCAmelCase__ ) def __UpperCAmelCase ( self : Tuple ) -> int: lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<pad>' ) self.assertEqual(vocab_keys[1] , '<unk>' ) self.assertEqual(vocab_keys[-1] , '▁eloquent' ) self.assertEqual(len(UpperCAmelCase__ ) , 3_0_0_0_0 ) def __UpperCAmelCase ( self : str ) -> List[str]: self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 ) def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCAmelCase = self.get_tokenizer() lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = 'I was born in 92000, and this is falsé.' lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase__ ) lowerCAmelCase = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase = self.get_rust_tokenizer() lowerCAmelCase = tokenizer.encode(UpperCAmelCase__ ) lowerCAmelCase = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def __UpperCAmelCase ( self : str ) -> str: lowerCAmelCase = AlbertTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) lowerCAmelCase = tokenizer.tokenize('This is a test' ) self.assertListEqual(UpperCAmelCase__ , ['▁this', '▁is', '▁a', '▁test'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [4_8, 2_5, 2_1, 1_2_8_9] ) lowerCAmelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( UpperCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] ) lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] ) lowerCAmelCase = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def __UpperCAmelCase ( self : str ) -> Tuple: lowerCAmelCase = AlbertTokenizer(UpperCAmelCase__ ) lowerCAmelCase = tokenizer.encode('sequence builders' ) lowerCAmelCase = tokenizer.encode('multi-sequence build' ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def __UpperCAmelCase ( self : Dict ) -> int: # fmt: off lowerCAmelCase = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
4
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _snake_case ( unittest.TestCase ): def __init__( self , a , a=7 , a=3 , a=10 , a=18 , a=30 , a=400 , a=True , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , a=None , ) -> Dict: SCREAMING_SNAKE_CASE = size if size is not None else {'shortest_edge': 18} SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'height': 18, 'width': 18} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = num_frames SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std SCREAMING_SNAKE_CASE = crop_size def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _snake_case ( A__ , unittest.TestCase ): _lowercase : List[str] = VivitImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: SCREAMING_SNAKE_CASE = VivitImageProcessingTester(self) @property def SCREAMING_SNAKE_CASE__ ( self) -> int: return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self) -> Any: SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(a , 'image_mean')) self.assertTrue(hasattr(a , 'image_std')) self.assertTrue(hasattr(a , 'do_normalize')) self.assertTrue(hasattr(a , 'do_resize')) self.assertTrue(hasattr(a , 'do_center_crop')) self.assertTrue(hasattr(a , 'size')) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'shortest_edge': 18}) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18}) SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84) self.assertEqual(image_processor.size , {'shortest_edge': 42}) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84}) def SCREAMING_SNAKE_CASE__ ( self) -> str: # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PIL videos SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a) for video in video_inputs: self.assertIsInstance(a , a) self.assertIsInstance(video[0] , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self) -> Tuple: # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a , numpify=a) for video in video_inputs: self.assertIsInstance(a , a) self.assertIsInstance(video[0] , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]: # Initialize image_processing SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_video_inputs(self.image_processor_tester , equal_resolution=a , torchify=a) for video in video_inputs: self.assertIsInstance(a , a) self.assertIsInstance(video[0] , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(video_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(a , return_tensors='pt').pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
137
0
import numpy as np import datasets _snake_case = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' _snake_case = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' _snake_case = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): def UpperCAmelCase ( self :int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def UpperCAmelCase ( self :str , _lowercase :List[Any] , _lowercase :Any ): '''simple docstring''' lowercase__ = np.array(lowercase_ ) lowercase__ = np.array(lowercase_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction lowercase__ = X - np.mean(lowercase_ ) lowercase__ = np.cov(reference_distribution.T ) try: lowercase__ = np.linalg.inv(lowercase_ ) except np.linalg.LinAlgError: lowercase__ = np.linalg.pinv(lowercase_ ) lowercase__ = np.dot(lowercase_ , lowercase_ ) lowercase__ = np.dot(lowercase_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
356
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowerCAmelCase ( unittest.TestCase ): __lowerCamelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCAmelCase ( self :Union[str, Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self :Optional[int] ): '''simple docstring''' lowercase__ = (3, 32, 1_28) lowercase__ = tempfile.mkdtemp() # fmt: off lowercase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on lowercase__ = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_lowercase ) + "\n" ) lowercase__ = { "do_normalize": False, "do_resize": True, "image_processor_type": "ViTImageProcessor", "resample": 3, "size": {"height": 32, "width": 1_28}, } lowercase__ = os.path.join(self.tmpdirname , _lowercase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(_lowercase , _lowercase ) def UpperCAmelCase ( self :Optional[Any] , **_lowercase :str ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCAmelCase ( self :List[Any] , **_lowercase :List[str] ): '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase ( self :str ): '''simple docstring''' lowercase__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta ) lowercase__ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) return image_input def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_image_processor() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) processor.save_pretrained(self.tmpdirname ) lowercase__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def UpperCAmelCase ( self :Optional[Any] ): '''simple docstring''' lowercase__ = self.get_tokenizer() lowercase__ = self.get_image_processor() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) processor.save_pretrained(self.tmpdirname ) lowercase__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 ) lowercase__ = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _lowercase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowercase ) def UpperCAmelCase ( self :List[Any] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) lowercase__ = self.prepare_image_inputs() lowercase__ = image_processor(_lowercase , return_tensors="np" ) lowercase__ = processor(images=_lowercase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase ( self :Optional[Any] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) lowercase__ = "test" lowercase__ = processor(text=_lowercase ) lowercase__ = tokenizer(_lowercase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase ( self :Optional[int] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) lowercase__ = "test" lowercase__ = self.prepare_image_inputs() lowercase__ = processor(text=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] ) # test if it raises when no input is passed with pytest.raises(_lowercase ): processor() def UpperCAmelCase ( self :Tuple ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] lowercase__ = processor.char_decode(_lowercase ) lowercase__ = tokenizer.batch_decode(_lowercase ) lowercase__ = [seq.replace(" " , "" ) for seq in decoded_tok] self.assertListEqual(_lowercase , _lowercase ) def UpperCAmelCase ( self :str ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) lowercase__ = None lowercase__ = self.prepare_image_inputs() lowercase__ = processor(text=_lowercase , images=_lowercase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCAmelCase ( self :List[str] ): '''simple docstring''' lowercase__ = self.get_image_processor() lowercase__ = self.get_tokenizer() lowercase__ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase ) lowercase__ = torch.randn(1 , 27 , 38 ) lowercase__ = torch.randn(1 , 27 , 5_02_57 ) lowercase__ = torch.randn(1 , 27 , 3_05_22 ) lowercase__ = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
201
0
"""simple docstring""" from sklearn.metrics import fa_score import datasets __lowerCAmelCase : Tuple =""" The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCAmelCase : Any =""" Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCAmelCase : Dict =""" @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _A ( datasets.Metric ): def A__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=1 , __lowerCAmelCase="binary" , __lowerCAmelCase=None ): """simple docstring""" lowercase = fa_score( __lowerCAmelCase , __lowerCAmelCase , labels=__lowerCAmelCase , pos_label=__lowerCAmelCase , average=__lowerCAmelCase , sample_weight=__lowerCAmelCase ) return {"f1": float(__lowerCAmelCase ) if score.size == 1 else score}
197
"""simple docstring""" from ..utils import DummyObject, requires_backends class _A ( metaclass=lowerCAmelCase ): snake_case__ : Optional[int] = ['torch', 'torchsde'] def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(self , ["""torch""", """torchsde"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """torchsde"""] ) @classmethod def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" requires_backends(cls , ["""torch""", """torchsde"""] )
197
1
'''simple docstring''' import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : List[str] = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class UpperCamelCase_ ( __magic_name__ ): lowercase = 'owlvit_text_model' def __init__( self , A=49408 , A=512 , A=2048 , A=12 , A=8 , A=16 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.0_2 , A=1.0 , A=0 , A=49406 , A=49407 , **A , ) -> Dict: super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A ) UpperCAmelCase : str = vocab_size UpperCAmelCase : List[Any] = hidden_size UpperCAmelCase : Any = intermediate_size UpperCAmelCase : List[str] = num_hidden_layers UpperCAmelCase : List[Any] = num_attention_heads UpperCAmelCase : Optional[int] = max_position_embeddings UpperCAmelCase : Dict = hidden_act UpperCAmelCase : List[str] = layer_norm_eps UpperCAmelCase : str = attention_dropout UpperCAmelCase : str = initializer_range UpperCAmelCase : Optional[int] = initializer_factor @classmethod def _lowercase( cls , A , **A ) -> "PretrainedConfig": cls._set_token_in_kwargs(A ) UpperCAmelCase , UpperCAmelCase : int = cls.get_config_dict(A , **A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": UpperCAmelCase : Dict = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A , **A ) class UpperCamelCase_ ( __magic_name__ ): lowercase = 'owlvit_vision_model' def __init__( self , A=768 , A=3072 , A=12 , A=12 , A=3 , A=768 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.0_2 , A=1.0 , **A , ) -> Optional[int]: super().__init__(**A ) UpperCAmelCase : Union[str, Any] = hidden_size UpperCAmelCase : int = intermediate_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Any = num_attention_heads UpperCAmelCase : str = num_channels UpperCAmelCase : Any = image_size UpperCAmelCase : Tuple = patch_size UpperCAmelCase : Dict = hidden_act UpperCAmelCase : Any = layer_norm_eps UpperCAmelCase : int = attention_dropout UpperCAmelCase : Any = initializer_range UpperCAmelCase : List[Any] = initializer_factor @classmethod def _lowercase( cls , A , **A ) -> "PretrainedConfig": cls._set_token_in_kwargs(A ) UpperCAmelCase , UpperCAmelCase : Union[str, Any] = cls.get_config_dict(A , **A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get("""model_type""" ) == "owlvit": UpperCAmelCase : List[Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A , **A ) class UpperCamelCase_ ( __magic_name__ ): lowercase = 'owlvit' lowercase = True def __init__( self , A=None , A=None , A=512 , A=2.6_5_9_2 , A=True , **A , ) -> int: super().__init__(**A ) if text_config is None: UpperCAmelCase : str = {} logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" ) if vision_config is None: UpperCAmelCase : List[Any] = {} logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" ) UpperCAmelCase : int = OwlViTTextConfig(**A ) UpperCAmelCase : Optional[Any] = OwlViTVisionConfig(**A ) UpperCAmelCase : Union[str, Any] = projection_dim UpperCAmelCase : int = logit_scale_init_value UpperCAmelCase : Optional[Any] = return_dict UpperCAmelCase : Any = 1.0 @classmethod def _lowercase( cls , A , **A ) -> "PretrainedConfig": cls._set_token_in_kwargs(A ) UpperCAmelCase , UpperCAmelCase : List[str] = cls.get_config_dict(A , **A ) if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(A , **A ) @classmethod def _lowercase( cls , A , A , **A ) -> Optional[Any]: UpperCAmelCase : int = {} UpperCAmelCase : int = text_config UpperCAmelCase : Optional[Any] = vision_config return cls.from_dict(A , **A ) def _lowercase( self ) -> Tuple: UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) UpperCAmelCase : List[str] = self.text_config.to_dict() UpperCAmelCase : List[str] = self.vision_config.to_dict() UpperCAmelCase : List[str] = self.__class__.model_type return output class UpperCamelCase_ ( __magic_name__ ): @property def _lowercase( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ] ) @property def _lowercase( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""logits_per_image""", {0: """batch"""}), ("""logits_per_text""", {0: """batch"""}), ("""text_embeds""", {0: """batch"""}), ("""image_embeds""", {0: """batch"""}), ] ) @property def _lowercase( self ) -> float: return 1e-4 def _lowercase( self , A , A = -1 , A = -1 , A = None , ) -> Mapping[str, Any]: UpperCAmelCase : Any = super().generate_dummy_inputs( processor.tokenizer , batch_size=A , seq_length=A , framework=A ) UpperCAmelCase : Optional[int] = super().generate_dummy_inputs( processor.image_processor , batch_size=A , framework=A ) return {**text_input_dict, **image_input_dict} @property def _lowercase( self ) -> int: return 14
338
'''simple docstring''' import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({"""train""": SplitInfo()} ), ] , ) def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Optional[int] = split_dict._to_yaml_list() assert len(_lowercase ) == len(_lowercase ) UpperCAmelCase : List[Any] = SplitDict._from_yaml_list(_lowercase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump UpperCAmelCase : List[str] = None # the split name of split_dict takes over the name of the split info object UpperCAmelCase : int = split_name assert split_dict == reloaded @pytest.mark.parametrize( """split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowercase ), SplitInfo(dataset_name="""my_dataset""" )] ) def __lowerCamelCase ( _lowercase ) -> List[str]: # For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name" # field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files UpperCAmelCase : Optional[Any] = asdict(SplitDict({"""train""": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
338
1
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __A : def __init__(self : List[Any] , __a : List[Any] , __a : int=3 , __a : Optional[int]=32 , __a : Optional[Any]=3 , __a : List[Any]=10 , __a : str=[10, 20, 30, 40] , __a : Any=[1, 1, 2, 1] , __a : str=True , __a : Optional[Any]=True , __a : Optional[int]="relu" , __a : Optional[Any]=3 , __a : Union[str, Any]=None , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = image_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = embeddings_size UpperCAmelCase_ = hidden_sizes UpperCAmelCase_ = depths UpperCAmelCase_ = is_training UpperCAmelCase_ = use_labels UpperCAmelCase_ = hidden_act UpperCAmelCase_ = num_labels UpperCAmelCase_ = scope UpperCAmelCase_ = len(__a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ = self.get_config() return config, pixel_values, labels def _lowercase (self : int ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _lowercase (self : str , __a : List[Any] , __a : Tuple , __a : Dict ): UpperCAmelCase_ = TFResNetModel(config=__a ) UpperCAmelCase_ = model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowercase (self : List[Any] , __a : Tuple , __a : Dict , __a : Tuple ): UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = TFResNetForImageClassification(__a ) UpperCAmelCase_ = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase (self : Any ): UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): a__ : Union[str, Any] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () a__ : List[Any] = ( {"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification} if is_tf_available() else {} ) a__ : int = False a__ : str = False a__ : Optional[Any] = False a__ : List[str] = False a__ : str = False def _lowercase (self : Tuple ): UpperCAmelCase_ = TFResNetModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=__a , has_text_modality=__a ) def _lowercase (self : Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowercase (self : Optional[int] ): return @unittest.skip(reason="ResNet does not use inputs_embeds" ) def _lowercase (self : Optional[Any] ): pass @unittest.skip(reason="ResNet does not support input and output embeddings" ) def _lowercase (self : str ): pass def _lowercase (self : Optional[Any] ): UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ = [*signature.parameters.keys()] UpperCAmelCase_ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def _lowercase (self : str ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def _lowercase (self : List[Any] ): def check_hidden_states_output(__a : str , __a : Dict , __a : Tuple ): UpperCAmelCase_ = model_class(__a ) UpperCAmelCase_ = model(**self._prepare_for_class(__a , __a ) ) UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ = self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ = layer_type UpperCAmelCase_ = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ = True check_hidden_states_output(__a , __a , __a ) def _lowercase (self : Tuple ): UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def _lowercase (self : List[str] ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ = TFResNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def lowerCAmelCase_ ( ) -> str: '''simple docstring''' UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class __A ( unittest.TestCase ): @cached_property def _lowercase (self : List[str] ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowercase (self : str ): UpperCAmelCase_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ = self.default_image_processor UpperCAmelCase_ = prepare_img() UpperCAmelCase_ = image_processor(images=__a , return_tensors="tf" ) # forward pass UpperCAmelCase_ = model(**__a ) # verify the logits UpperCAmelCase_ = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) UpperCAmelCase_ = tf.constant([-11.10_69, -9.78_77, -8.37_77] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
1
'''simple docstring''' import os from math import logaa def lowerCAmelCase_ ( snake_case_ : str = "base_exp.txt" ) -> int: '''simple docstring''' UpperCAmelCase_ = 0 UpperCAmelCase_ = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(snake_case_ ) , snake_case_ ) ) ): UpperCAmelCase_ , UpperCAmelCase_ = list(map(snake_case_ , line.split("," ) ) ) if x * logaa(snake_case_ ) > largest: UpperCAmelCase_ = x * logaa(snake_case_ ) UpperCAmelCase_ = i + 1 return result if __name__ == "__main__": print(solution())
1
1
"""simple docstring""" import functools from typing import Any def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] ): # Validation if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or len(UpperCAmelCase_ ) == 0: raise ValueError("""the string should be not empty string""" ) if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all( isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(UpperCAmelCase_ ) > 0 for item in words ): raise ValueError("""the words should be a list of non-empty strings""" ) # Build trie A__ = {} A__ = """WORD_KEEPER""" for word in words: A__ = trie for c in word: if c not in trie_node: A__ = {} A__ = trie_node[c] A__ = True A__ = len(UpperCAmelCase_ ) # Dynamic programming method @functools.cache def is_breakable(UpperCAmelCase_ : int ) -> bool: if index == len_string: return True A__ = trie for i in range(UpperCAmelCase_ , UpperCAmelCase_ ): A__ = trie_node.get(string[i] , UpperCAmelCase_ ) if trie_node is None: return False if trie_node.get(UpperCAmelCase_ , UpperCAmelCase_ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
69
"""simple docstring""" import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a : """simple docstring""" def __init__( self: Optional[int] , UpperCamelCase: List[str] , UpperCamelCase: Dict=13 , UpperCamelCase: Optional[Any]=30 , UpperCamelCase: Optional[Any]=2 , UpperCamelCase: List[str]=3 , UpperCamelCase: Tuple=True , UpperCamelCase: Dict=True , UpperCamelCase: Optional[int]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[Any]=4 , UpperCamelCase: Optional[Any]=37 , UpperCamelCase: Optional[Any]="gelu" , UpperCamelCase: Dict=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: str=10 , UpperCamelCase: Any=0.02 , UpperCamelCase: List[Any]=None , UpperCamelCase: int=2 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = is_training A__ = use_labels A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = type_sequence_label_size A__ = initializer_range A__ = scope A__ = encoder_stride # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) A__ = (image_size // patch_size) ** 2 A__ = num_patches + 1 def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self: int ): """simple docstring""" return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[int] ): """simple docstring""" A__ = ViTModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ): """simple docstring""" A__ = ViTForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ = 1 A__ = ViTForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase ( self: str , UpperCamelCase: Dict , UpperCamelCase: List[Any] , UpperCamelCase: List[Any] ): """simple docstring""" A__ = self.type_sequence_label_size A__ = ViTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A__ = 1 A__ = ViTForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) UpperCAmelCase = ( {"feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase ( self: Any ): """simple docstring""" A__ = ViTModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 ) def UpperCamelCase ( self: str ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViT does not use inputs_embeds""" ) def UpperCamelCase ( self: str ): """simple docstring""" pass def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def UpperCamelCase ( self: Dict ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCamelCase ( self: Dict ): """simple docstring""" for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = ViTModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def _snake_case ( ): A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class a ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase ( self: List[Any] ): """simple docstring""" return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None @slow def UpperCamelCase ( self: Dict ): """simple docstring""" A__ = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(UpperCamelCase ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) A__ = torch.tensor([-0.2_744, 0.8_215, -0.0_836] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) ) @slow def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(UpperCamelCase ) A__ = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 ) A__ = prepare_img() A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) A__ = inputs.pixel_values.to(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase ) # verify the logits A__ = torch.Size((1, 36_01, 3_84) ) self.assertEqual(outputs.last_hidden_state.shape , UpperCamelCase ) A__ = torch.tensor( [[4.2_340, 4.3_906, -6.6_692], [4.5_463, 1.8_928, -6.7_257], [4.4_429, 0.8_496, -5.8_585]] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ) A__ = inputs.pixel_values.to(UpperCamelCase ) # forward pass to make sure inference works in fp16 with torch.no_grad(): A__ = model(UpperCamelCase )
69
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowercase = { """configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""], """tokenization_ctrl""": ["""CTRLTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """CTRLForSequenceClassification""", """CTRLLMHeadModel""", """CTRLModel""", """CTRLPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowercase = [ """TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFCTRLForSequenceClassification""", """TFCTRLLMHeadModel""", """TFCTRLModel""", """TFCTRLPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig from .tokenization_ctrl import CTRLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) else: import sys __lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
40
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]: '''simple docstring''' a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ ) a , a : int = [i[0] for i in r], [i[1] for i in r] a : Union[str, Any] = list(accumulate(A_ ) ) a : Optional[Any] = bisect(A_ , A_ ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
40
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase): '''simple docstring''' _SCREAMING_SNAKE_CASE :Any = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) _SCREAMING_SNAKE_CASE :List[Any] = ( { """feature-extraction""": TFMobileBertModel, """fill-mask""": TFMobileBertForMaskedLM, """question-answering""": TFMobileBertForQuestionAnswering, """text-classification""": TFMobileBertForSequenceClassification, """token-classification""": TFMobileBertForTokenClassification, """zero-shot""": TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) _SCREAMING_SNAKE_CASE :str = False _SCREAMING_SNAKE_CASE :int = False def _a ( self , _a , _a , _a=False ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = super()._prepare_for_class(_a , _a , return_labels=_a ) if return_labels: if model_class in get_values(_a ): SCREAMING_SNAKE_CASE__ : List[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa ) return inputs_dict class __a (UpperCamelCase_): '''simple docstring''' def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=32 , _a=2 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = parent SCREAMING_SNAKE_CASE__ : Any = batch_size SCREAMING_SNAKE_CASE__ : int = seq_length SCREAMING_SNAKE_CASE__ : Tuple = is_training SCREAMING_SNAKE_CASE__ : Tuple = use_input_mask SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids SCREAMING_SNAKE_CASE__ : List[str] = use_labels SCREAMING_SNAKE_CASE__ : int = vocab_size SCREAMING_SNAKE_CASE__ : List[str] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : str = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size SCREAMING_SNAKE_CASE__ : List[str] = type_sequence_label_size SCREAMING_SNAKE_CASE__ : Tuple = initializer_range SCREAMING_SNAKE_CASE__ : int = num_labels SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_choices SCREAMING_SNAKE_CASE__ : Tuple = scope SCREAMING_SNAKE_CASE__ : Union[str, Any] = embedding_size def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ : Optional[int] = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : Tuple = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ : Any = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertModel(config=_a ) SCREAMING_SNAKE_CASE__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : List[str] = model(_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = [input_ids, input_mask] SCREAMING_SNAKE_CASE__ : Any = model(_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = TFMobileBertForMaskedLM(config=_a ) SCREAMING_SNAKE_CASE__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForNextSentencePrediction(config=_a ) SCREAMING_SNAKE_CASE__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : Tuple = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = TFMobileBertForPreTraining(config=_a ) SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : Dict = model(_a ) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFMobileBertForSequenceClassification(config=_a ) SCREAMING_SNAKE_CASE__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.num_choices SCREAMING_SNAKE_CASE__ : Optional[Any] = TFMobileBertForMultipleChoice(config=_a ) SCREAMING_SNAKE_CASE__ : Dict = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE__ : Optional[int] = tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) ) SCREAMING_SNAKE_CASE__ : Optional[int] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels SCREAMING_SNAKE_CASE__ : int = TFMobileBertForTokenClassification(config=_a ) SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : Tuple = model(_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFMobileBertForQuestionAnswering(config=_a ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} SCREAMING_SNAKE_CASE__ : Tuple = model(_a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _a ( self ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) : Any = config_and_inputs SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict def _a ( self ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertModelTest.TFMobileBertModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=_a , hidden_size=37 ) def _a ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_a ) def _a ( self ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_a ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_a ) def _a ( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_a ) def _a ( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_a ) def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_a ) def _a ( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_a ) def _a ( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_a ) @slow def _a ( self ) -> int: """simple docstring""" for model_name in ["google/mobilebert-uncased"]: SCREAMING_SNAKE_CASE__ : List[Any] = TFMobileBertModel.from_pretrained(_a ) self.assertIsNotNone(_a ) @require_tf class __a (unittest.TestCase): '''simple docstring''' @slow def _a ( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""" ) SCREAMING_SNAKE_CASE__ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] ) SCREAMING_SNAKE_CASE__ : Tuple = model(_a )[0] SCREAMING_SNAKE_CASE__ : List[Any] = [1, 6, 30_522] self.assertEqual(output.shape , _a ) SCREAMING_SNAKE_CASE__ : int = tf.constant( [ [ [-4.5_919_547, -9.248_295, -9.645_256], [-6.7_306_175, -6.440_284, -6.6_052_837], [-7.2_743_506, -6.7_847_915, -6.024_673], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1E-4 )
56
"""simple docstring""" def _lowercase ( __lowerCAmelCase ) -> Dict: SCREAMING_SNAKE_CASE__ : Dict = [] SCREAMING_SNAKE_CASE__ : Optional[Any] = [] SCREAMING_SNAKE_CASE__ : int = { """^""": 3, """*""": 2, """/""": 2, """%""": 2, """+""": 1, """-""": 1, } # Priority of each operator SCREAMING_SNAKE_CASE__ : List[Any] = len(__lowerCAmelCase ) if (len(__lowerCAmelCase ) > 7) else 7 # Print table header for output print( """Symbol""".center(8 ) , """Stack""".center(__lowerCAmelCase ) , """Postfix""".center(__lowerCAmelCase ) , sep=""" | """ , ) print("""-""" * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(__lowerCAmelCase ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(__lowerCAmelCase ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(__lowerCAmelCase ) == 0: stack.append(__lowerCAmelCase ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(__lowerCAmelCase ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(__lowerCAmelCase ) # push x to stack print( x.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format while len(__lowerCAmelCase ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( """ """.center(8 ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , ("""""".join(__lowerCAmelCase )).ljust(__lowerCAmelCase ) , sep=""" | """ , ) # Output in tabular format return "".join(__lowerCAmelCase ) # return Postfix as str def _lowercase ( __lowerCAmelCase ) -> str: SCREAMING_SNAKE_CASE__ : List[str] = list(infix[::-1] ) # reverse the infix equation for i in range(len(__lowerCAmelCase ) ): if infix[i] == "(": SCREAMING_SNAKE_CASE__ : Optional[int] = """)""" # change "(" to ")" elif infix[i] == ")": SCREAMING_SNAKE_CASE__ : Optional[Any] = """(""" # change ")" to "(" return (infix_2_postfix("""""".join(__lowerCAmelCase ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": a :Optional[int] = input("\nEnter an Infix Equation = ") # Input an Infix equation a :Dict = "".join(Infix.split()) # Remove spaces from the input print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
56
1
"""simple docstring""" import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class snake_case_: def lowerCamelCase__ ( self : Optional[Any] ): torch.manual_seed(0 ) lowerCAmelCase : Optional[Any] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCAmelCase : str = UNetaDConditionModel( sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase : int = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) lowerCAmelCase : Tuple = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowerCamelCase__ ( self : int ): torch.manual_seed(0 ) lowerCAmelCase : str = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) lowerCAmelCase : Any = UNetaDConditionModel( sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.414 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) lowerCAmelCase : Optional[Any] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , thresholding=UpperCamelCase_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) lowerCAmelCase : Tuple = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.0_001 , beta_end=0.02 , ) torch.manual_seed(0 ) lowerCAmelCase : List[str] = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def lowerCamelCase__ ( self : Tuple ): lowerCAmelCase : Union[str, Any] = self.get_dummy_components() lowerCAmelCase : str = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : Any = inputs['''prompt'''] lowerCAmelCase : Optional[Any] = inputs['''generator'''] lowerCAmelCase : Tuple = inputs['''num_inference_steps'''] lowerCAmelCase : Any = inputs['''output_type'''] if "image" in inputs: lowerCAmelCase : str = inputs['''image'''] else: lowerCAmelCase : List[str] = None if "mask_image" in inputs: lowerCAmelCase : Optional[Any] = inputs['''mask_image'''] else: lowerCAmelCase : List[str] = None if "original_image" in inputs: lowerCAmelCase : Tuple = inputs['''original_image'''] else: lowerCAmelCase : Optional[int] = None lowerCAmelCase, lowerCAmelCase : Optional[Any] = pipe.encode_prompt(UpperCamelCase_ ) # inputs with prompt converted to embeddings lowerCAmelCase : Dict = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: lowerCAmelCase : Optional[Any] = image if mask_image is not None: lowerCAmelCase : Tuple = mask_image if original_image is not None: lowerCAmelCase : Optional[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) lowerCAmelCase : Dict = pipe(**UpperCamelCase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = self.pipeline_class.from_pretrained(UpperCamelCase_ ) pipe_loaded.to(UpperCamelCase_ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(UpperCamelCase_ , UpperCamelCase_ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , ) lowerCAmelCase : int = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : List[str] = inputs['''generator'''] lowerCAmelCase : Dict = inputs['''num_inference_steps'''] lowerCAmelCase : Optional[int] = inputs['''output_type'''] # inputs with prompt converted to embeddings lowerCAmelCase : Union[str, Any] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: lowerCAmelCase : Any = image if mask_image is not None: lowerCAmelCase : Optional[int] = mask_image if original_image is not None: lowerCAmelCase : Any = original_image lowerCAmelCase : List[str] = pipe_loaded(**UpperCamelCase_ )[0] lowerCAmelCase : str = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max() self.assertLess(UpperCamelCase_ , 1E-4 ) def lowerCamelCase__ ( self : str ): lowerCAmelCase : str = self.get_dummy_components() lowerCAmelCase : int = self.pipeline_class(**UpperCamelCase_ ) pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : List[Any] = pipe(**UpperCamelCase_ )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(UpperCamelCase_ ) lowerCAmelCase : Union[str, Any] = self.pipeline_class.from_pretrained(UpperCamelCase_ ) pipe_loaded.to(UpperCamelCase_ ) pipe_loaded.set_progress_bar_config(disable=UpperCamelCase_ ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ ) lowerCAmelCase : int = pipe_loaded(**UpperCamelCase_ )[0] lowerCAmelCase : Union[str, Any] = np.abs(to_np(UpperCamelCase_ ) - to_np(UpperCamelCase_ ) ).max() self.assertLess(UpperCamelCase_ , 1E-4 )
60
"""simple docstring""" import math def _snake_case ( ): lowerCAmelCase : Union[str, Any] = input('''Enter message: ''' ) lowerCAmelCase : Optional[int] = int(input(f'''Enter key [2-{len(_snake_case ) - 1}]: ''' ) ) lowerCAmelCase : str = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): lowerCAmelCase : Any = encrypt_message(_snake_case , _snake_case ) elif mode.lower().startswith('''d''' ): lowerCAmelCase : Union[str, Any] = decrypt_message(_snake_case , _snake_case ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(f'''Output:\n{text + "|"}''' ) def _snake_case ( _snake_case : int , _snake_case : str ): lowerCAmelCase : Optional[Any] = [''''''] * key for col in range(_snake_case ): lowerCAmelCase : Optional[Any] = col while pointer < len(_snake_case ): cipher_text[col] += message[pointer] pointer += key return "".join(_snake_case ) def _snake_case ( _snake_case : int , _snake_case : str ): lowerCAmelCase : Union[str, Any] = math.ceil(len(_snake_case ) / key ) lowerCAmelCase : str = key lowerCAmelCase : Any = (num_cols * num_rows) - len(_snake_case ) lowerCAmelCase : Dict = [''''''] * num_cols lowerCAmelCase : int = 0 lowerCAmelCase : int = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): lowerCAmelCase : int = 0 row += 1 return "".join(_snake_case ) if __name__ == "__main__": import doctest doctest.testmod() main()
60
1
'''simple docstring''' import argparse import hashlib # hashlib is only used inside the Test class import struct class _lowerCAmelCase : '''simple docstring''' def __init__(self , UpperCAmelCase ) -> Optional[int]: _snake_case = data _snake_case = [0x67_45_23_01, 0xEF_CD_AB_89, 0x98_BA_DC_FE, 0x10_32_54_76, 0xC3_D2_E1_F0] @staticmethod def lowercase (UpperCAmelCase , UpperCAmelCase ) -> str: return ((n << b) | (n >> (32 - b))) & 0xFF_FF_FF_FF def lowercase (self ) -> Dict: _snake_case = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64) _snake_case = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) ) return padded_data def lowercase (self ) -> Optional[int]: return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def lowercase (self , UpperCAmelCase ) -> Optional[int]: _snake_case = list(struct.unpack(""">16L""" , UpperCAmelCase ) ) + [0] * 64 for i in range(16 , 80 ): _snake_case = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def lowercase (self ) -> Any: _snake_case = self.padding() _snake_case = self.split_blocks() for block in self.blocks: _snake_case = self.expand_block(UpperCAmelCase ) _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = self.h for i in range(0 , 80 ): if 0 <= i < 20: _snake_case = (b & c) | ((~b) & d) _snake_case = 0x5A_82_79_99 elif 20 <= i < 40: _snake_case = b ^ c ^ d _snake_case = 0x6E_D9_EB_A1 elif 40 <= i < 60: _snake_case = (b & c) | (b & d) | (c & d) _snake_case = 0x8F_1B_BC_DC elif 60 <= i < 80: _snake_case = b ^ c ^ d _snake_case = 0xCA_62_C1_D6 _snake_case, _snake_case, _snake_case, _snake_case, _snake_case = ( self.rotate(UpperCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFF_FF_FF_FF, a, self.rotate(UpperCAmelCase , 30 ), c, d, ) _snake_case = ( self.h[0] + a & 0xFF_FF_FF_FF, self.h[1] + b & 0xFF_FF_FF_FF, self.h[2] + c & 0xFF_FF_FF_FF, self.h[3] + d & 0xFF_FF_FF_FF, self.h[4] + e & 0xFF_FF_FF_FF, ) return ("{:08x}" * 5).format(*self.h ) def __SCREAMING_SNAKE_CASE ( ): _snake_case = b"""Test String""" assert SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(_SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324 def __SCREAMING_SNAKE_CASE ( ): _snake_case = argparse.ArgumentParser(description="""Process some strings or files""" ) parser.add_argument( """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , ) parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" ) _snake_case = parser.parse_args() _snake_case = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , """rb""" ) as f: _snake_case = f.read() else: _snake_case = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" ) print(SHAaHash(_SCREAMING_SNAKE_CASE ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
270
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase = { 'configuration_efficientformer': [ 'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientFormerConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ['EfficientFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientFormerForImageClassification', 'EfficientFormerForImageClassificationWithTeacher', 'EfficientFormerModel', 'EfficientFormerPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ 'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFEfficientFormerForImageClassification', 'TFEfficientFormerForImageClassificationWithTeacher', 'TFEfficientFormerModel', 'TFEfficientFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
270
1
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : str ): '''simple docstring''' if not (isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case )): raise ValueError('longest_common_substring() takes two strings for inputs' ) lowercase = len(__snake_case ) lowercase = len(__snake_case ) lowercase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] lowercase = 0 lowercase = 0 for i in range(1 , texta_length + 1 ): for j in range(1 , texta_length + 1 ): if texta[i - 1] == texta[j - 1]: lowercase = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: lowercase = i lowercase = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
220
"""simple docstring""" import logging import os import sys from pathlib import Path from unittest.mock import patch from parameterized import parameterized from run_eval import run_generate from run_eval_search import run_search from transformers.testing_utils import CaptureStdout, TestCasePlus, slow from utils import ROUGE_KEYS logging.basicConfig(level=logging.DEBUG) _UpperCamelCase : Tuple = logging.getLogger() def _SCREAMING_SNAKE_CASE ( __snake_case : Path , __snake_case : list ): '''simple docstring''' lowercase = '\n'.join(__snake_case ) Path(__snake_case ).open('w' ).writelines(__snake_case ) _UpperCamelCase : Union[str, Any] = 'patrickvonplaten/t5-tiny-random' _UpperCamelCase : Union[str, Any] = 'sshleifer/bart-tiny-random' _UpperCamelCase : Tuple = 'sshleifer/tiny-mbart' _UpperCamelCase : Union[str, Any] = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks class a ( a_ ): def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' lowercase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() lowercase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.'] _dump_articles(_lowerCamelCase , _lowerCamelCase ) lowercase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' ) lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization' lowercase = F'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split() with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ): run_generate() assert Path(_lowerCamelCase ).exists() # os.remove(Path(output_file_name)) def UpperCamelCase_ ( self ): self.run_eval_tester(_lowerCamelCase ) @parameterized.expand([BART_TINY, MBART_TINY] ) @slow def UpperCamelCase_ ( self , _lowerCamelCase ): self.run_eval_tester(_lowerCamelCase ) @parameterized.expand([T5_TINY, MBART_TINY] ) @slow def UpperCamelCase_ ( self , _lowerCamelCase ): lowercase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source' lowercase = input_file_name.parent / 'utest_output.txt' assert not output_file_name.exists() lowercase = { 'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'], 'de': [ 'Maschinelles Lernen ist großartig, oder?', 'Ich esse gerne Bananen', 'Morgen ist wieder ein toller Tag!', ], } lowercase = Path(self.get_auto_remove_tmp_dir() ) lowercase = str(tmp_dir / 'scores.json' ) lowercase = str(tmp_dir / 'val.target' ) _dump_articles(_lowerCamelCase , text['en'] ) _dump_articles(_lowerCamelCase , text['de'] ) lowercase = 'translation_en_to_de' if model == T5_TINY else 'summarization' lowercase = F'\n run_eval_search.py\n {model}\n {str(_lowerCamelCase )}\n {str(_lowerCamelCase )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split() testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] ) with patch.object(_lowerCamelCase , 'argv' , _lowerCamelCase ): with CaptureStdout() as cs: run_search() lowercase = [' num_beams | length_penalty', model, 'Best score args'] lowercase = ['Info'] if "translation" in task: expected_strings.append('bleu' ) else: expected_strings.extend(_lowerCamelCase ) for w in expected_strings: assert w in cs.out for w in un_expected_strings: assert w not in cs.out assert Path(_lowerCamelCase ).exists() os.remove(Path(_lowerCamelCase ) )
220
1
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() _lowerCamelCase : Optional[Any] = logging.get_logger() @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 UpperCAmelCase__ = field(default_factory=UpperCAmelCase__ ) UpperCAmelCase__ = field(default_factory=UpperCAmelCase__ ) def SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Tensor , UpperCAmelCase__ : Tensor) ->str: '''simple docstring''' A__ = len(list(m.modules())) == 1 or isinstance(UpperCAmelCase__ , nn.Convad) or isinstance(UpperCAmelCase__ , nn.BatchNormad) if has_not_submodules: self.traced.append(UpperCAmelCase__) def __call__( self : int , UpperCAmelCase__ : Tensor) ->List[Any]: '''simple docstring''' for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(UpperCAmelCase__) [x.remove() for x in self.handles] return self @property def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' return list(filter(lambda UpperCAmelCase__: len(list(x.state_dict().keys())) > 0 , self.traced)) @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = 42 UpperCAmelCase__ = 42 UpperCAmelCase__ = 1 UpperCAmelCase__ = field(default_factory=UpperCAmelCase__ ) UpperCAmelCase__ = field(default_factory=UpperCAmelCase__ ) UpperCAmelCase__ = True def __call__( self : Optional[Any] , UpperCAmelCase__ : Tensor) ->Union[str, Any]: '''simple docstring''' A__ = Tracker(self.dest)(UpperCAmelCase__).parametrized A__ = Tracker(self.src)(UpperCAmelCase__).parametrized A__ = list(filter(lambda UpperCAmelCase__: type(UpperCAmelCase__) not in self.src_skip , UpperCAmelCase__)) A__ = list(filter(lambda UpperCAmelCase__: type(UpperCAmelCase__) not in self.dest_skip , UpperCAmelCase__)) if len(UpperCAmelCase__) != len(UpperCAmelCase__) and self.raise_if_mismatch: raise Exception( f"""Numbers of operations are different. Source module has {len(UpperCAmelCase__)} operations while""" f""" destination module has {len(UpperCAmelCase__)}.""") for dest_m, src_m in zip(UpperCAmelCase__ , UpperCAmelCase__): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"""Transfered from={src_m} to={dest_m}""") class UpperCamelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCAmelCase__ : nn.Module) ->Optional[int]: '''simple docstring''' super().__init__() A__ = [] # - get the stem feature_blocks.append(('''conv1''', model.stem)) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('''block'''), f"""Unexpected layer name {k}""" A__ = len(UpperCAmelCase__) + 1 feature_blocks.append((f"""res{block_index}""", v)) A__ = nn.ModuleDict(UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Tensor) ->List[Any]: '''simple docstring''' return get_trunk_forward_outputs( UpperCAmelCase__ , out_feat_keys=UpperCAmelCase__ , feature_blocks=self._feature_blocks , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : str) ->str: '''simple docstring''' A__ = x.split('''-''') return x_split[0] + x_split[1] + "_" + "".join(x_split[2:]) def __getitem__( self : Optional[Any] , UpperCAmelCase__ : str) ->Callable[[], Tuple[nn.Module, Dict]]: '''simple docstring''' if x not in self: A__ = self.convert_name_to_timm(UpperCAmelCase__) A__ = partial(lambda: (timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__).eval(), None)) else: A__ = super().__getitem__(UpperCAmelCase__) return val class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __getitem__( self : Tuple , UpperCAmelCase__ : str) ->Callable[[], nn.Module]: '''simple docstring''' if "seer" in x and "in1k" not in x: A__ = RegNetModel else: A__ = RegNetForImageClassification return val def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" for from_key, to_key in keys: A__ = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = True , ) -> Union[str, Any]: """simple docstring""" print(f"""Converting {name}...""" ) with torch.no_grad(): A__ , A__ = from_model_func() A__ = our_model_func(lowercase_ ).eval() A__ = ModuleTransfer(src=lowercase_ , dest=lowercase_ , raise_if_mismatch=lowercase_ ) A__ = torch.randn((1, 3, 224, 224) ) module_transfer(lowercase_ ) if from_state_dict is not None: A__ = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: A__ = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')] A__ = manually_copy_vissl_head(lowercase_ , our_model.state_dict() , lowercase_ ) our_model.load_state_dict(lowercase_ ) A__ = our_model(lowercase_ , output_hidden_states=lowercase_ ) A__ = ( our_outputs.logits if isinstance(lowercase_ , lowercase_ ) else our_outputs.last_hidden_state ) A__ = from_model(lowercase_ ) A__ = from_output[-1] if type(lowercase_ ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: A__ = our_outputs.hidden_states[-1] assert torch.allclose(lowercase_ , lowercase_ ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=lowercase_ , ) A__ = 224 if '''seer''' not in name else 384 # we can use the convnext one A__ = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=lowercase_ ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=lowercase_ , ) print(f"""Pushed {name}""" ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None , lowercase_ = True ) -> Any: """simple docstring""" A__ = '''imagenet-1k-id2label.json''' A__ = 1_000 A__ = (1, num_labels) A__ = '''huggingface/label-files''' A__ = num_labels A__ = json.load(open(cached_download(hf_hub_url(lowercase_ , lowercase_ , repo_type='''dataset''' ) ) , '''r''' ) ) A__ = {int(lowercase_ ): v for k, v in idalabel.items()} A__ = idalabel A__ = {v: k for k, v in idalabel.items()} A__ = partial(lowercase_ , num_labels=lowercase_ , idalabel=lowercase_ , labelaid=lowercase_ ) A__ = { '''regnet-x-002''': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ), '''regnet-x-004''': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ), '''regnet-x-006''': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ), '''regnet-x-008''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ), '''regnet-x-016''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ), '''regnet-x-032''': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ), '''regnet-x-040''': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ), '''regnet-x-064''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ), '''regnet-x-080''': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ), '''regnet-x-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ), '''regnet-x-160''': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ), '''regnet-x-320''': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ), # y variant '''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), '''regnet-y-004''': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), '''regnet-y-006''': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), '''regnet-y-008''': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), '''regnet-y-016''': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), '''regnet-y-032''': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ), '''regnet-y-040''': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ), '''regnet-y-064''': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ), '''regnet-y-080''': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ), '''regnet-y-120''': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ), '''regnet-y-160''': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ), '''regnet-y-320''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 '''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), '''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ), '''regnet-y-1280-seer''': RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ), '''regnet-y-2560-seer''': RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ), '''regnet-y-10b-seer''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ), # finetuned on imagenet '''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), '''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ), '''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ), '''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ), '''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ), } A__ = NameToOurModelFuncMap() A__ = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(lowercase_ , lowercase_ ) -> Tuple[nn.Module, Dict]: A__ = torch.hub.load_state_dict_from_url(lowercase_ , model_dir=str(lowercase_ ) , map_location='''cpu''' ) A__ = model_func() # check if we have a head, if yes add it A__ = files['''classy_state_dict''']['''base_model''']['''model'''] A__ = model_state_dict['''trunk'''] model.load_state_dict(lowercase_ ) return model.eval(), model_state_dict["heads"] # pretrained A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) A__ = partial( lowercase_ , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , lowercase_ , lowercase_ , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( lowercase_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , lowercase_ , lowercase_ , lowercase_ , ) return config, expected_shape if __name__ == "__main__": _lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _lowerCamelCase : List[str] = parser.parse_args() _lowerCamelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
231
_lowerCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609_344, "knot": 1.852, } _lowerCamelCase : dict[str, float] = { "km/h": 1.0, "m/s": 0.277_777_778, "mph": 0.621_371_192, "knot": 0.539_956_803, } def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" if unit_to not in speed_chart or unit_from not in speed_chart_inverse: A__ = ( f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n""" f"""Valid values are: {", ".join(lowercase_ )}""" ) raise ValueError(lowercase_ ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
231
1
from collections.abc import Iterable from typing import Generic, TypeVar _snake_case : Optional[Any] = TypeVar('_T') class _UpperCAmelCase ( Generic[_T] ): """simple docstring""" def __init__( self : int , lowerCAmelCase_ : Iterable[_T] | None = None ) -> None: __lowerCAmelCase = list(iterable or [] ) __lowerCAmelCase = [] def __len__( self : Optional[int] ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[Any] ) -> str: return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def lowercase ( self : Tuple , lowerCAmelCase_ : _T ) -> None: self._stacka.append(lowerCAmelCase_ ) def lowercase ( self : Union[str, Any] ) -> _T: __lowerCAmelCase = self._stacka.pop __lowerCAmelCase = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('Queue is empty' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
284
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _snake_case : Dict = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case : Optional[int] = [ 'MRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'MraForMaskedLM', 'MraForMultipleChoice', 'MraForQuestionAnswering', 'MraForSequenceClassification', 'MraForTokenClassification', 'MraLayer', 'MraModel', 'MraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys _snake_case : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure)
284
1
'''simple docstring''' def lowerCAmelCase_ ( ): '''simple docstring''' A : int = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] A : int = 6 A : List[str] = 1 A : int = 1901 A : List[Any] = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 A : List[str] = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 A : Union[str, Any] = day - 29 else: if day > days_per_month[month - 1]: month += 1 A : Dict = day - days_per_month[month - 2] if month > 12: year += 1 A : Tuple = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/vivit-b-16x2-kinetics400''': ( '''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json''' ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = '''vivit''' def __init__( self : str ,A_ : List[str]=224 ,A_ : Union[str, Any]=32 ,A_ : List[str]=[2, 16, 16] ,A_ : Any=3 ,A_ : int=768 ,A_ : Optional[int]=12 ,A_ : int=12 ,A_ : Any=3072 ,A_ : Union[str, Any]="gelu_fast" ,A_ : Any=0.0 ,A_ : Dict=0.0 ,A_ : Optional[int]=0.02 ,A_ : Union[str, Any]=1e-06 ,A_ : Union[str, Any]=True ,**A_ : List[Any] ,) -> Dict: A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = layer_norm_eps A = image_size A = num_frames A = tubelet_size A = num_channels A = qkv_bias super().__init__(**A_ )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
8
0
def A (__A : float , __A : float , __A : float , __A : float , __A : float , ) -> float: """simple docstring""" UpperCAmelCase_ = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: UpperCAmelCase_ = 1 - (matter_density + radiation_density + dark_energy) UpperCAmelCase_ = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) UpperCAmelCase_ = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation snake_case_ : Any = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
356
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right snake_case_ : Optional[Any] = 128022 snake_case_ : Optional[int] = 128028 @require_sentencepiece class __snake_case ( a , unittest.TestCase ): UpperCAmelCase__ : List[str] = MaMaaaTokenizer UpperCAmelCase__ : int = False UpperCAmelCase__ : Dict = False UpperCAmelCase__ : List[str] = True def lowerCamelCase ( self : str): """simple docstring""" super().setUp() UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''', '''\u0120''', '''<pad>'''] UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case)))) UpperCAmelCase_ = Path(self.tmpdirname) save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file''']) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]): """simple docstring""" return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case) def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]): """simple docstring""" return ( "This is a test", "This is a test", ) def lowerCamelCase ( self : Optional[int]): """simple docstring""" UpperCAmelCase_ = '''</s>''' UpperCAmelCase_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = list(tokenizer.get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''</s>''') self.assertEqual(vocab_keys[1] , '''<unk>''') self.assertEqual(vocab_keys[-1] , '''<s>''') self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab())) @unittest.skip('''Skip this test while all models are still to be uploaded.''') def lowerCamelCase ( self : Optional[int]): """simple docstring""" pass def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.get_tokenizer() UpperCAmelCase_ = tokenizer.tokenize('''This is a test''') self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , ) UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6]) self.assertListEqual(_snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case) self.assertEqual(_snake_case , '''This is a test''') @slow def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , ) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): UpperCAmelCase__ : Dict = '''facebook/m2m100_418M''' UpperCAmelCase__ : Dict = [ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] UpperCAmelCase__ : Dict = [ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def lowerCamelCase ( cls : Optional[Any]): """simple docstring""" UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''') UpperCAmelCase_ = 1 return cls def lowerCamelCase ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006) self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022) self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076) self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063) def lowerCamelCase ( self : Union[str, Any]): """simple docstring""" UpperCAmelCase_ = self.tokenizer.get_vocab() self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size) self.assertEqual(vocab['''<unk>'''] , 3) self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case) def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , _snake_case) def lowerCamelCase ( self : Any): """simple docstring""" self.assertIn(_snake_case , self.tokenizer.all_special_ids) # fmt: off UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2] # fmt: on UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case) UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case) self.assertEqual(_snake_case , _snake_case) self.assertNotIn(self.tokenizer.eos_token , _snake_case) def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = tempfile.mkdtemp() UpperCAmelCase_ = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_snake_case) UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case) self.assertDictEqual(new_tok.lang_token_to_id , _snake_case) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = '''en''' UpperCAmelCase_ = '''fr''' UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''') UpperCAmelCase_ = shift_tokens_right( batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id) for k in batch: UpperCAmelCase_ = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def lowerCamelCase ( self : str): """simple docstring""" UpperCAmelCase_ = '''mr''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) UpperCAmelCase_ = '''zh''' self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) @require_torch def lowerCamelCase ( self : Dict): """simple docstring""" UpperCAmelCase_ = '''mr''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) UpperCAmelCase_ = '''zh''' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')]) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id]) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)]) @require_torch def lowerCamelCase ( self : Tuple): """simple docstring""" UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''') self.assertEqual( nested_simplify(_snake_case) , { # en_XX, A, test, EOS '''input_ids''': [[128022, 58, 4183, 2]], '''attention_mask''': [[1, 1, 1, 1]], # ar_AR '''forced_bos_token_id''': 128006, } , )
7
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE_:str = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_:Union[str, Any] = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_:Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
116
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) a__ : int = { 'configuration_layoutlmv3': [ 'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv3Config', 'LayoutLMv3OnnxConfig', ], 'processing_layoutlmv3': ['LayoutLMv3Processor'], 'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = ['LayoutLMv3TokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ 'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'LayoutLMv3ForQuestionAnswering', 'LayoutLMv3ForSequenceClassification', 'LayoutLMv3ForTokenClassification', 'LayoutLMv3Model', 'LayoutLMv3PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ 'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFLayoutLMv3ForQuestionAnswering', 'TFLayoutLMv3ForSequenceClassification', 'TFLayoutLMv3ForTokenClassification', 'TFLayoutLMv3Model', 'TFLayoutLMv3PreTrainedModel', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[Any] = ['LayoutLMv3FeatureExtractor'] a__ : str = ['LayoutLMv3ImageProcessor'] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys a__ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
349
0
'''simple docstring''' def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> list: """simple docstring""" if len(_SCREAMING_SNAKE_CASE ) < 2: return collection def circle_sort_util(_SCREAMING_SNAKE_CASE : list , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> bool: lowerCAmelCase = False if low == high: return swapped lowerCAmelCase = low lowerCAmelCase = high while left < right: if collection[left] > collection[right]: lowerCAmelCase, lowerCAmelCase = ( collection[right], collection[left], ) lowerCAmelCase = True left += 1 right -= 1 if left == right and collection[left] > collection[right + 1]: lowerCAmelCase, lowerCAmelCase = ( collection[right + 1], collection[left], ) lowerCAmelCase = True lowerCAmelCase = low + int((high - low) / 2 ) lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , mid + 1 , _SCREAMING_SNAKE_CASE ) return swapped or left_swap or right_swap lowerCAmelCase = True while is_not_sorted is True: lowerCAmelCase = circle_sort_util(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 ) return collection if __name__ == "__main__": UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase = [int(item) for item in user_input.split(',')] print(circle_sort(unsorted))
187
'''simple docstring''' import tensorflow as tf from ...tf_utils import shape_list class __snake_case( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , A_ , A_ , A_ , A_ , A_=1 , A_=False , **A_ ) -> Optional[int]: super().__init__(**A_ ) lowerCAmelCase = vocab_size lowerCAmelCase = d_embed lowerCAmelCase = d_proj lowerCAmelCase = cutoffs + [vocab_size] lowerCAmelCase = [0] + self.cutoffs lowerCAmelCase = div_val lowerCAmelCase = self.cutoffs[0] lowerCAmelCase = len(self.cutoffs ) - 1 lowerCAmelCase = self.shortlist_size + self.n_clusters lowerCAmelCase = keep_order lowerCAmelCase = [] lowerCAmelCase = [] def __snake_case ( self , A_ ) -> int: if self.n_clusters > 0: lowerCAmelCase = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="""zeros""" , trainable=A_ , name="""cluster_weight""" ) lowerCAmelCase = self.add_weight( shape=(self.n_clusters,) , initializer="""zeros""" , trainable=A_ , name="""cluster_bias""" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: lowerCAmelCase = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' , ) self.out_projs.append(A_ ) else: self.out_projs.append(A_ ) lowerCAmelCase = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , ) lowerCAmelCase = self.add_weight( shape=(self.vocab_size,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowerCAmelCase = self.d_embed // (self.div_val**i) lowerCAmelCase = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="""zeros""" , trainable=A_ , name=f'out_projs_._{i}' ) self.out_projs.append(A_ ) lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._weight' , ) lowerCAmelCase = self.add_weight( shape=(r_idx - l_idx,) , initializer="""zeros""" , trainable=A_ , name=f'out_layers_._{i}_._bias' , ) self.out_layers.append((weight, bias) ) super().build(A_ ) @staticmethod def __snake_case ( A_ , A_ , A_ , A_=None ) -> List[Any]: lowerCAmelCase = x if proj is not None: lowerCAmelCase = tf.einsum("""ibd,ed->ibe""" , A_ , A_ ) return tf.einsum("""ibd,nd->ibn""" , A_ , A_ ) + b @staticmethod def __snake_case ( A_ , A_ ) -> Dict: lowerCAmelCase = shape_list(A_ ) lowerCAmelCase = tf.range(lp_size[0] , dtype=target.dtype ) lowerCAmelCase = tf.stack([r, target] , 1 ) return tf.gather_nd(A_ , A_ ) def __snake_case ( self , A_ , A_ , A_=True , A_=False ) -> Tuple: lowerCAmelCase = 0 if self.n_clusters == 0: lowerCAmelCase = self._logit(A_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: lowerCAmelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=A_ , logits=A_ ) lowerCAmelCase = tf.nn.log_softmax(A_ , axis=-1 ) else: lowerCAmelCase = shape_list(A_ ) lowerCAmelCase = [] lowerCAmelCase = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): lowerCAmelCase, lowerCAmelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: lowerCAmelCase = (target >= l_idx) & (target < r_idx) lowerCAmelCase = tf.where(A_ ) lowerCAmelCase = tf.boolean_mask(A_ , A_ ) - l_idx if self.div_val == 1: lowerCAmelCase = self.out_layers[0][0][l_idx:r_idx] lowerCAmelCase = self.out_layers[0][1][l_idx:r_idx] else: lowerCAmelCase = self.out_layers[i][0] lowerCAmelCase = self.out_layers[i][1] if i == 0: lowerCAmelCase = tf.concat([cur_W, self.cluster_weight] , 0 ) lowerCAmelCase = tf.concat([cur_b, self.cluster_bias] , 0 ) lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[0] ) lowerCAmelCase = tf.nn.log_softmax(A_ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: lowerCAmelCase = tf.boolean_mask(A_ , A_ ) lowerCAmelCase = self._gather_logprob(A_ , A_ ) else: lowerCAmelCase = self._logit(A_ , A_ , A_ , self.out_projs[i] ) lowerCAmelCase = tf.nn.log_softmax(A_ ) lowerCAmelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster lowerCAmelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(A_ ) if target is not None: lowerCAmelCase = tf.boolean_mask(A_ , A_ ) lowerCAmelCase = tf.boolean_mask(A_ , A_ ) lowerCAmelCase = self._gather_logprob(A_ , A_ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(A_ , -cur_logprob , shape_list(A_ ) ) lowerCAmelCase = tf.concat(A_ , axis=-1 ) if target is not None: if return_mean: lowerCAmelCase = tf.reduce_mean(A_ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(A_ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(A_ , name=self.name , aggregation="""mean""" if return_mean else """""" ) return out
187
1
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase: Dict = logging.get_logger(__name__) _lowercase: int = { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json", } class _lowercase ( lowerCAmelCase ): """simple docstring""" __A = "lxmert" __A = {} def __init__(self , lowerCamelCase_=30522 , lowerCamelCase_=768 , lowerCamelCase_=12 , lowerCamelCase_=9500 , lowerCamelCase_=1600 , lowerCamelCase_=400 , lowerCamelCase_=3072 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=512 , lowerCamelCase_=2 , lowerCamelCase_=0.02 , lowerCamelCase_=1E-1_2 , lowerCamelCase_=9 , lowerCamelCase_=5 , lowerCamelCase_=5 , lowerCamelCase_=2048 , lowerCamelCase_=4 , lowerCamelCase_=6.67 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , **lowerCamelCase_ , ): """simple docstring""" a = vocab_size a = hidden_size a = num_attention_heads a = hidden_act a = intermediate_size a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = initializer_range a = layer_norm_eps a = num_qa_labels a = num_object_labels a = num_attr_labels a = l_layers a = x_layers a = r_layers a = visual_feat_dim a = visual_pos_dim a = visual_loss_normalizer a = task_matched a = task_mask_lm a = task_obj_predict a = task_qa a = visual_obj_loss a = visual_attr_loss a = visual_feat_loss a = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} super().__init__(**lowerCamelCase_ )
227
_lowercase: Dict = [ (1000, "M"), (900, "CM"), (500, "D"), (400, "CD"), (100, "C"), (90, "XC"), (50, "L"), (40, "XL"), (10, "X"), (9, "IX"), (5, "V"), (4, "IV"), (1, "I"), ] def a( A : str ) -> int: """simple docstring""" a = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} a = 0 a = 0 while place < len(A ): if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def a( A : int ) -> str: """simple docstring""" a = [] for arabic, roman in ROMAN: ((a) , (a)) = divmod(A , A ) result.append(roman * factor ) if number == 0: break return "".join(A ) if __name__ == "__main__": import doctest doctest.testmod()
227
1
"""simple docstring""" from collections import defaultdict from math import gcd def lowercase ( _snake_case : Union[str, Any] = 1_500_000 ) ->Optional[Any]: """simple docstring""" __snake_case : defaultdict = defaultdict(_snake_case ) __snake_case : str = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _snake_case , 2 ): if gcd(_snake_case , _snake_case ) > 1: continue __snake_case : Optional[Any] = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_snake_case , limit + 1 , _snake_case ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F'{solution() = }')
370
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def lowercase ( ) ->Optional[int]: """simple docstring""" __snake_case : int = torch.nn.Linear(2 , 4 ) __snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 ) __snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) __snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) __snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def lowercase ( _snake_case : str ) ->Optional[Any]: """simple docstring""" return (model.weight.abs().sum() + model.bias.abs().sum()).item() def lowercase ( _snake_case : Union[str, Any] ) ->Tuple: """simple docstring""" __snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(_snake_case ) class _UpperCAmelCase ( __snake_case ): '''simple docstring''' @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(a_ ): __snake_case : Any = Accelerator(cpu=a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case : Optional[int] = GradientState() assert state.num_steps == 1 __snake_case : str = 4 assert state.num_steps == 4 assert state.sync_gradients is True __snake_case : List[Any] = False assert state.sync_gradients is False GradientState._reset_state() def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) self.assertTrue(prepared_model in accelerator._models ) self.assertTrue(prepared_optimizer in accelerator._optimizers ) self.assertTrue(prepared_scheduler in accelerator._schedulers ) self.assertTrue(prepared_train_dl in accelerator._dataloaders ) self.assertTrue(prepared_valid_dl in accelerator._dataloaders ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Tuple = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) accelerator.free_memory() self.assertTrue(len(accelerator._models ) == 0 ) self.assertTrue(len(accelerator._optimizers ) == 0 ) self.assertTrue(len(accelerator._schedulers ) == 0 ) self.assertTrue(len(accelerator._dataloaders ) == 0 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*a_ , **a_ ): pass with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ): __snake_case : List[Any] = Accelerator() self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : Optional[int] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : Any = get_signature(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # make sure loaded weights match accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components() accelerator.prepare(a_ , a_ , a_ , a_ , a_ ) __snake_case : List[Any] = get_signature(a_ ) # saving hook def save_config(a_ , a_ , a_ ): __snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__} with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f: json.dump(a_ , a_ ) # loading hook def load_config(a_ , a_ ): with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f: __snake_case : Any = json.load(a_ ) __snake_case : List[str] = config['''class_name'''] __snake_case : str = accelerator.register_save_state_pre_hook(a_ ) __snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ ) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Any = '''random''' # make sure loaded weights match with hooks accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__ ) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(a_ ) # make sure random weights don't match with hooks removed load_random_weights(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 ) # random class name to verify correct one is loaded __snake_case : Union[str, Any] = '''random''' # make sure loaded weights match with hooks removed accelerator.load_state(a_ ) self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 ) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components() __snake_case : Union[str, Any] = None # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertTrue(dummy_obj is None ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : str = Accelerator() __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components() __snake_case : Optional[int] = [1, 2, 3] # This should work __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare( a_ , a_ , a_ , a_ , a_ , a_ ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) self.assertEqual( getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , ) __snake_case : Optional[Any] = Accelerator() # This should work __snake_case : Any = accelerator.prepare(a_ ) @slow @require_bnb def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : Any = Accelerator() with init_empty_weights(): __snake_case : List[str] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : Union[str, Any] = infer_auto_device_map(a_ ) __snake_case : str = '''cpu''' __snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ ) # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Dict = accelerator.prepare(a_ ) @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM __snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU} with init_empty_weights(): __snake_case : Any = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) model.tie_weights() __snake_case : List[Any] = infer_auto_device_map(a_ ) __snake_case : Dict = 1 __snake_case : str = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Any = Accelerator() # This should not work and get value error with self.assertRaises(a_ ): __snake_case : Tuple = accelerator.prepare(a_ ) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' from transformers import AutoModelForCausalLM with init_empty_weights(): __snake_case : Dict = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , ) __snake_case : Tuple = infer_auto_device_map(a_ ) __snake_case : Tuple = 1 __snake_case : List[Any] = AutoModelForCausalLM.from_pretrained( '''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , ) __snake_case : Tuple = Accelerator() # This should work __snake_case : Dict = accelerator.prepare(a_ ) @require_cuda def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : List[Any] = torch.nn.Linear(10 , 10 ) __snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 ) __snake_case : Optional[Any] = Accelerator(cpu=a_ ) __snake_case : str = accelerator.prepare(a_ )
24
0
from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def __lowercase ( a__ , a__ , a__ , a__ , a__ ) -> np.ndarray: __SCREAMING_SNAKE_CASE = cva.getAffineTransform(a__ , a__ ) return cva.warpAffine(a__ , a__ , (rows, cols) ) if __name__ == "__main__": # read original image lowerCAmelCase__ : Optional[int] =cva.imread( str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''') ) # turn image in gray scale value lowerCAmelCase__ : Any =cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape lowerCAmelCase__ , lowerCAmelCase__ : Any =gray_img.shape # set different points to rotate image lowerCAmelCase__ : Tuple =np.array([[50, 50], [200, 50], [50, 200]], np.floataa) lowerCAmelCase__ : List[Any] =np.array([[10, 100], [200, 50], [100, 250]], np.floataa) lowerCAmelCase__ : List[str] =np.array([[50, 50], [150, 50], [120, 200]], np.floataa) lowerCAmelCase__ : List[Any] =np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list lowerCAmelCase__ : List[str] =[ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations lowerCAmelCase__ : Any =plt.figure(1) lowerCAmelCase__ : str =['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3'''] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''') plt.title(titles[i]) plt.axis('''off''') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
257
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _A ( self ): '''simple docstring''' super().tearDown() gc.collect() def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( 'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger' __SCREAMING_SNAKE_CASE = jax.device_count() __SCREAMING_SNAKE_CASE = num_samples * [prompt] __SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE = replicate(_A ) __SCREAMING_SNAKE_CASE = shard(_A ) __SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 'stabilityai/stable-diffusion-2' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='scheduler' ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='bf16' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE = scheduler_params __SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger' __SCREAMING_SNAKE_CASE = jax.device_count() __SCREAMING_SNAKE_CASE = num_samples * [prompt] __SCREAMING_SNAKE_CASE = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE = replicate(_A ) __SCREAMING_SNAKE_CASE = shard(_A ) __SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
257
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : List[str] = {'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Tuple = [ '''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SEWForCTC''', '''SEWForSequenceClassification''', '''SEWModel''', '''SEWPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys A : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
359
from ...configuration_utils import PretrainedConfig from ...utils import logging A : Dict = logging.get_logger(__name__) A : Dict = { '''google/pegasus-large''': '''https://huggingface.co/google/pegasus-large/resolve/main/config.json''', # See all PEGASUS models at https://huggingface.co/models?filter=pegasus } class A (SCREAMING_SNAKE_CASE ): '''simple docstring''' __lowerCamelCase : Tuple = '''pegasus''' __lowerCamelCase : Any = ['''past_key_values'''] __lowerCamelCase : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[Any] , __lowerCAmelCase : str=5_02_65 , __lowerCAmelCase : Dict=10_24 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : Any=40_96 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Dict=12 , __lowerCAmelCase : Optional[int]=40_96 , __lowerCAmelCase : Dict=16 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Union[str, Any]=10_24 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Dict=0.0 , __lowerCAmelCase : Union[str, Any]=0.0 , __lowerCAmelCase : Union[str, Any]=0.0_2 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : str=False , __lowerCAmelCase : int=0 , __lowerCAmelCase : Optional[Any]=1 , __lowerCAmelCase : Optional[int]=1 , **__lowerCAmelCase : Dict , ) -> Dict: """simple docstring""" A__ = vocab_size A__ = max_position_embeddings A__ = d_model A__ = encoder_ffn_dim A__ = encoder_layers A__ = encoder_attention_heads A__ = decoder_ffn_dim A__ = decoder_layers A__ = decoder_attention_heads A__ = dropout A__ = attention_dropout A__ = activation_dropout A__ = activation_function A__ = init_std A__ = encoder_layerdrop A__ = decoder_layerdrop A__ = use_cache A__ = encoder_layers A__ = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , is_encoder_decoder=__lowerCAmelCase , decoder_start_token_id=__lowerCAmelCase , forced_eos_token_id=__lowerCAmelCase , **__lowerCAmelCase , ) @property def a_ ( self : Dict ) -> int: """simple docstring""" return self.encoder_attention_heads @property def a_ ( self : Optional[Any] ) -> int: """simple docstring""" return self.d_model
276
0
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 UpperCamelCase__ = get_tests_dir('''fixtures/dummy-config.json''') class lowerCamelCase_ ( unittest.TestCase ): def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = 0 def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) ) def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = AutoConfig.from_pretrained('''bert-base-uncased''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = AutoConfig.for_model('''roberta''' ) self.assertIsInstance(_A , _A ) def lowercase_ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCAmelCase__ : str = os.path.join(_A , '''fake-roberta''' ) os.makedirs(_A , exist_ok=_A ) with open(os.path.join(_A , '''config.json''' ) , '''w''' ) as f: f.write(json.dumps({} ) ) UpperCAmelCase__ : str = AutoConfig.from_pretrained(_A ) self.assertEqual(type(_A ) , _A ) def lowercase_ ( self : List[Any] ): '''simple docstring''' try: AutoConfig.register('''custom''' , _A ) # Wrong model type will raise an error with self.assertRaises(_A ): AutoConfig.register('''model''' , _A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_A ): AutoConfig.register('''bert''' , _A ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase__ : int = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A ) UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained(_A ) self.assertIsInstance(_A , _A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def lowercase_ ( self : List[Any] ): '''simple docstring''' with self.assertRaisesRegex( _A , '''bert-base is not a local folder and is not a valid model identifier''' ): UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained('''bert-base''' ) def lowercase_ ( self : str ): '''simple docstring''' with self.assertRaisesRegex( _A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): UpperCAmelCase__ : int = AutoConfig.from_pretrained(_A , revision='''aaaaaa''' ) def lowercase_ ( self : Tuple ): '''simple docstring''' with self.assertRaisesRegex( _A , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ): UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' with self.assertRaises(_A ): UpperCAmelCase__ : Union[str, Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(_A ): UpperCAmelCase__ : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) UpperCAmelCase__ : int = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_A ) UpperCAmelCase__ : str = AutoConfig.from_pretrained(_A , trust_remote_code=_A ) self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' ) def lowercase_ ( self : List[str] ): '''simple docstring''' class lowerCamelCase_ ( __a ): lowerCAmelCase__ = 'new-model' try: AutoConfig.register('''new-model''' , _A ) # If remote code is not set, the default is to use local UpperCAmelCase__ : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote code is disabled, we load the local one. UpperCAmelCase__ : List[str] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote is enabled, we load from the Hub UpperCAmelCase__ : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=_A ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
181
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = '''▁''' UpperCamelCase__ = {'''vocab_file''': '''sentencepiece.bpe.model'''} UpperCamelCase__ = { '''vocab_file''': { '''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''', } } UpperCamelCase__ = { '''facebook/xglm-564M''': 2_0_4_8, } class lowerCamelCase_ ( __a ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] def __init__( self : Optional[int] , _A : Optional[Any] , _A : Optional[Any]="<s>" , _A : List[str]="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Tuple="<unk>" , _A : List[str]="<pad>" , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ): '''simple docstring''' UpperCAmelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs # Compatibility with the original tokenizer UpperCAmelCase__ : Optional[int] = 7 UpperCAmelCase__ : Optional[int] = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )] UpperCAmelCase__ : Tuple = kwargs.get('''additional_special_tokens''' , [] ) kwargs["additional_special_tokens"] += [ word for word in madeup_words if word not in kwargs["additional_special_tokens"] ] super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) UpperCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_A ) ) UpperCAmelCase__ : List[str] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase__ : Any = 1 # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} UpperCAmelCase__ : int = len(self.sp_model ) UpperCAmelCase__ : Optional[int] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )} self.fairseq_tokens_to_ids.update(_A ) UpperCAmelCase__ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = self.__dict__.copy() UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self : Union[str, Any] , _A : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.sep_token_id] + token_ids_a UpperCAmelCase__ : Dict = [self.sep_token_id] return sep + token_ids_a + sep + sep + token_ids_a def lowercase_ ( self : str , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) def lowercase_ ( self : Any , _A : List[int] , _A : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [self.sep_token_id] if token_ids_a is None: return len(sep + token_ids_a ) * [0] return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0] @property def lowercase_ ( self : Any ): '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words def lowercase_ ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : str = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ ( self : Optional[Any] , _A : str ): '''simple docstring''' return self.sp_model.encode(_A , out_type=_A ) def lowercase_ ( self : List[str] , _A : List[Any] ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase__ : Union[str, Any] = self.sp_model.PieceToId(_A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase_ ( self : List[Any] , _A : str ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase_ ( self : int , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = ''''''.join(_A ).replace(_A , ''' ''' ).strip() return out_string def lowercase_ ( self : Any , _A : str , _A : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ : List[str] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , '''wb''' ) as fi: UpperCAmelCase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
181
1
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class SCREAMING_SNAKE_CASE : """simple docstring""" @staticmethod def __A ( *__A: Any , **__A: List[str] ) -> Optional[int]: pass @is_pipeline_test @require_vision @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" A_ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def __A ( self: int , __A: str , __A: List[Any] , __A: Union[str, Any] ) -> Dict: _A = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) _A = [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] return object_detector, examples def __A ( self: Optional[Any] , __A: str , __A: Union[str, Any] ) -> Any: _A = object_detector(examples[0] , threshold=0.0 ) _A = len(__A ) self.assertGreater(__A , 0 ) self.assertEqual( __A , [ { '''score''': ANY(__A ), '''label''': ANY(__A ), '''box''': {'''xmin''': ANY(__A ), '''ymin''': ANY(__A ), '''xmax''': ANY(__A ), '''ymax''': ANY(__A )}, } for i in range(__A ) ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __A ( self: Union[str, Any] ) -> Tuple: pass @require_torch def __A ( self: Optional[int] ) -> Union[str, Any]: _A = pipeline( '''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' ) _A = object_detector( '''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , ) self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, {'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}}, {'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, ] , ) _A = object_detector( [ { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], } ] , threshold=0.64 , ) self.assertEqual( nested_simplify(__A , decimals=4 ) , [ [ {'''score''': 0.7_235, '''label''': '''cat''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_218, '''label''': '''remote''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.7_184, '''label''': '''couch''', '''box''': {'''xmin''': 2_04, '''ymin''': 1_67, '''xmax''': 2_32, '''ymax''': 1_90}}, {'''score''': 0.6_748, '''label''': '''remote''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_656, '''label''': '''cat''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_614, '''label''': '''couch''', '''box''': {'''xmin''': 5_71, '''ymin''': 83, '''xmax''': 5_98, '''ymax''': 1_03}}, {'''score''': 0.6_456, '''label''': '''remote''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, {'''score''': 0.642, '''label''': '''remote''', '''box''': {'''xmin''': 67, '''ymin''': 2_74, '''xmax''': 93, '''ymax''': 2_97}}, {'''score''': 0.6_419, '''label''': '''cat''', '''box''': {'''xmin''': 4_94, '''ymin''': 1_05, '''xmax''': 5_21, '''ymax''': 1_27}}, ] ] , ) @require_torch @slow def __A ( self: List[str] ) -> int: _A = pipeline('''zero-shot-object-detection''' ) _A = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , ) self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ] , ) _A = object_detector( [ { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, { '''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''', '''candidate_labels''': ['''cat''', '''remote''', '''couch'''], }, ] , ) self.assertEqual( nested_simplify(__A , decimals=4 ) , [ [ {'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ], [ {'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, {'''score''': 0.1_474, '''label''': '''remote''', '''box''': {'''xmin''': 3_35, '''ymin''': 74, '''xmax''': 3_71, '''ymax''': 1_87}}, {'''score''': 0.1_208, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_42, '''ymax''': 4_76}}, ], ] , ) @require_tf @unittest.skip('''Zero Shot Object Detection not implemented in TF''' ) def __A ( self: Optional[Any] ) -> int: pass @require_torch @slow def __A ( self: Dict ) -> List[Any]: _A = 0.2 _A = pipeline('''zero-shot-object-detection''' ) _A = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__A , ) self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, {'''score''': 0.2_537, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 55, '''xmax''': 3_15, '''ymax''': 4_72}}, ] , ) @require_torch @slow def __A ( self: int ) -> Tuple: _A = 2 _A = pipeline('''zero-shot-object-detection''' ) _A = object_detector( '''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__A , ) self.assertEqual( nested_simplify(__A , decimals=4 ) , [ {'''score''': 0.2_868, '''label''': '''cat''', '''box''': {'''xmin''': 3_24, '''ymin''': 20, '''xmax''': 6_40, '''ymax''': 3_73}}, {'''score''': 0.277, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 72, '''xmax''': 1_77, '''ymax''': 1_15}}, ] , )
359
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers __A = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
75
0
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ): # Check if the input is valid if not len(lowerCamelCase ) == len(lowerCamelCase ) == 3: raise ValueError("Please enter a valid equation." ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("Both a & b of two equations can't be zero." ) # Extract the coefficients lowercase , lowercase , lowercase :str = equationa lowercase , lowercase , lowercase :int = equationa # Calculate the determinants of the matrices lowercase :int = aa * ba - aa * ba lowercase :int = ca * ba - ca * ba lowercase :Union[str, Any] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("Infinite solutions. (Consistent system)" ) else: raise ValueError("No solution. (Inconsistent system)" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase :Optional[int] = determinant_x / determinant lowercase :int = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
236
from typing import Union import fire import torch from tqdm import tqdm def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = "cpu", lowerCamelCase = None ): lowercase :Optional[Any] = torch.load(lowerCamelCase, map_location=lowerCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(lowerCamelCase, torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) lowercase :List[Any] = v.half() if save_path is None: # overwrite src_path lowercase :Optional[Any] = src_path torch.save(lowerCamelCase, lowerCamelCase ) if __name__ == "__main__": fire.Fire(convert)
236
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { 'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json', } class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "git_vision_model" def __init__( self: Dict , __A: List[str]=7_68 , __A: Tuple=30_72 , __A: Union[str, Any]=12 , __A: str=12 , __A: Tuple=3 , __A: List[str]=2_24 , __A: str=16 , __A: str="quick_gelu" , __A: Dict=1e-5 , __A: Any=0.0 , __A: Tuple=0.02 , **__A: Tuple , ) -> Optional[int]: super().__init__(**__A ) _A = hidden_size _A = intermediate_size _A = num_hidden_layers _A = num_attention_heads _A = num_channels _A = patch_size _A = image_size _A = initializer_range _A = attention_dropout _A = layer_norm_eps _A = hidden_act @classmethod def __A ( cls: Any , __A: Union[str, os.PathLike] , **__A: List[str] ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) _A ,_A = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from GITConfig if config_dict.get('''model_type''' ) == "git": _A = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( snake_case ): """simple docstring""" A_ = "git" def __init__( self: int , __A: List[str]=None , __A: str=3_05_22 , __A: Any=7_68 , __A: Tuple=6 , __A: Union[str, Any]=12 , __A: Dict=30_72 , __A: int="gelu" , __A: int=0.1 , __A: List[str]=0.1 , __A: Union[str, Any]=10_24 , __A: List[Any]=0.02 , __A: str=1e-12 , __A: List[str]=0 , __A: Union[str, Any]="absolute" , __A: Tuple=True , __A: Union[str, Any]=False , __A: Tuple=1_01 , __A: Any=1_02 , __A: Optional[int]=None , **__A: str , ) -> List[Any]: super().__init__(bos_token_id=__A , eos_token_id=__A , pad_token_id=__A , **__A ) if vision_config is None: _A = {} logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' ) _A = GitVisionConfig(**__A ) _A = vocab_size _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = hidden_act _A = intermediate_size _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = max_position_embeddings _A = initializer_range _A = layer_norm_eps _A = position_embedding_type _A = use_cache _A = tie_word_embeddings _A = num_image_with_embedding _A = bos_token_id _A = eos_token_id def __A ( self: List[Any] ) -> Any: _A = copy.deepcopy(self.__dict__ ) _A = self.vision_config.to_dict() _A = self.__class__.model_type return output
75
import importlib import shutil import threading import warnings from typing import List import fsspec import fsspec.asyn from . import compression from .hffilesystem import HfFileSystem __A = importlib.util.find_spec('s3fs') is not None if _has_safs: from .safilesystem import SaFileSystem # noqa: F401 __A = [ compression.BzaFileSystem, compression.GzipFileSystem, compression.LzaFileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.') fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) def __A ( _lowercase ): '''simple docstring''' if "://" in dataset_path: _A = dataset_path.split('''://''' )[1] return dataset_path def __A ( _lowercase ): '''simple docstring''' if fs is not None and fs.protocol != "file": return True else: return False def __A ( _lowercase , _lowercase , _lowercase ): '''simple docstring''' _A = not is_remote_filesystem(_lowercase ) if is_local: # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(_lowercase ) , fs._strip_protocol(_lowercase ) ) else: fs.mv(_lowercase , _lowercase , recursive=_lowercase ) def __A ( ): '''simple docstring''' if hasattr(fsspec.asyn , '''reset_lock''' ): # for future fsspec>2022.05.0 fsspec.asyn.reset_lock() else: _A = None _A = None _A = threading.Lock()
75
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __magic_name__ = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
100
"""simple docstring""" lowerCAmelCase__ = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" UpperCamelCase = set() # keep track of all the paths to be checked UpperCamelCase = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue UpperCamelCase = queue.pop(0 ) # get the last node from the path UpperCamelCase = path[-1] if node not in explored: UpperCamelCase = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: UpperCamelCase = list(_SCREAMING_SNAKE_CASE ) new_path.append(_SCREAMING_SNAKE_CASE ) queue.append(_SCREAMING_SNAKE_CASE ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_SCREAMING_SNAKE_CASE ) # in case there's no path between the 2 nodes return [] def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 UpperCamelCase = [start] UpperCamelCase = set(_SCREAMING_SNAKE_CASE ) # Keep tab on distances from `start` node. UpperCamelCase = {start: 0, target: -1} while queue: UpperCamelCase = queue.pop(0 ) if node == target: UpperCamelCase = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_SCREAMING_SNAKE_CASE ) queue.append(_SCREAMING_SNAKE_CASE ) UpperCamelCase = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
153
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __UpperCAmelCase = logging.get_logger(__name__) def __UpperCamelCase ( lowercase__ : int ) -> List[List[ImageInput]]: '''simple docstring''' if isinstance(lowercase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowercase__ , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowercase__ ): return [[videos]] raise ValueError(f'Could not make batched video from {videos}' ) class __a ( __UpperCamelCase ): __snake_case : Union[str, Any] = ["""pixel_values"""] def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 2_55 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Tuple , ): super().__init__(**UpperCAmelCase ) lowerCAmelCase_ : Any = size if size is not None else {"""shortest_edge""": 2_24} lowerCAmelCase_ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24} lowerCAmelCase_ : Optional[int] = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) lowerCAmelCase_ : Any = do_resize lowerCAmelCase_ : Optional[int] = size lowerCAmelCase_ : int = do_center_crop lowerCAmelCase_ : int = crop_size lowerCAmelCase_ : Optional[Any] = resample lowerCAmelCase_ : List[Any] = do_rescale lowerCAmelCase_ : int = rescale_factor lowerCAmelCase_ : Any = do_normalize lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def A ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ): lowerCAmelCase_ : str = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) if "shortest_edge" in size: lowerCAmelCase_ : Tuple = get_resize_output_image_size(UpperCAmelCase , size["""shortest_edge"""] , default_to_square=UpperCAmelCase ) elif "height" in size and "width" in size: lowerCAmelCase_ : Tuple = (size["""height"""], size["""width"""]) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def A ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ): lowerCAmelCase_ : List[str] = get_size_dict(UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase ) def A ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ): return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def A ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ): return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase ) def A ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ): if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. lowerCAmelCase_ : List[str] = to_numpy_array(UpperCAmelCase ) if do_resize: lowerCAmelCase_ : str = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) if do_center_crop: lowerCAmelCase_ : str = self.center_crop(UpperCAmelCase , size=UpperCAmelCase ) if do_rescale: lowerCAmelCase_ : List[Any] = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) if do_normalize: lowerCAmelCase_ : str = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) lowerCAmelCase_ : Dict = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) return image def A ( self : List[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Dict , ): lowerCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase_ : str = resample if resample is not None else self.resample lowerCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std lowerCAmelCase_ : Union[str, Any] = size if size is not None else self.size lowerCAmelCase_ : Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase ) lowerCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size lowerCAmelCase_ : Any = get_size_dict(UpperCAmelCase , param_name="""crop_size""" ) if not valid_images(UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) lowerCAmelCase_ : str = make_batched(UpperCAmelCase ) lowerCAmelCase_ : List[Any] = [ [ self._preprocess_image( image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , ) for img in video ] for video in videos ] lowerCAmelCase_ : Optional[int] = {"""pixel_values""": videos} return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
28
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __a ( __UpperCamelCase ): __snake_case : Any = ["""image_processor""", """tokenizer"""] __snake_case : Tuple = """BlipImageProcessor""" __snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ): lowerCAmelCase_ : str = False super().__init__(UpperCAmelCase , UpperCAmelCase ) lowerCAmelCase_ : Tuple = self.image_processor def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ): if images is None and text is None: raise ValueError("""You have to specify either images or text.""" ) # Get only text if images is None: lowerCAmelCase_ : str = self.tokenizer lowerCAmelCase_ : List[Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) return text_encoding # add pixel_values lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase ) if text is not None: lowerCAmelCase_ : Optional[Any] = self.tokenizer( text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , ) else: lowerCAmelCase_ : int = None if text_encoding is not None: encoding_image_processor.update(UpperCAmelCase ) return encoding_image_processor def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ): return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase ) def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ): return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase ) @property def A ( self : int ): lowerCAmelCase_ : int = self.tokenizer.model_input_names lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
28
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __UpperCamelCase = { '''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ErnieForCausalLM''', '''ErnieForMaskedLM''', '''ErnieForMultipleChoice''', '''ErnieForNextSentencePrediction''', '''ErnieForPreTraining''', '''ErnieForQuestionAnswering''', '''ErnieForSequenceClassification''', '''ErnieForTokenClassification''', '''ErnieModel''', '''ErniePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
69
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES A__: List[Any] = logging.get_logger(__name__) A__: Any = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) A__: Dict = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) A__: Optional[int] = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) A__: Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) A__: Optional[Any] = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) A__: List[Any] = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) A__: int = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) A__: Optional[Any] = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) A__: Optional[Any] = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) A__: List[Any] = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) A__: Optional[int] = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) A__: Optional[Any] = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) A__: Dict = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) A__: Dict = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) A__: Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) A__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) A__: str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) A__: int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) A__: str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) A__: List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) A__: List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) A__: int = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) A__: str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) A__: Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) A__: Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) A__: Optional[int] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) A__: str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) A__: Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_MAPPING A__: int = auto_class_update(FlaxAutoModel) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING A__: Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING A__: Any = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING A__: List[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING A__: int = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A__: Tuple = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING A__: Optional[Any] = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A__: str = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING A__: List[Any] = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING A__: Any = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING A__: Dict = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING A__: Dict = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class _a ( _BaseAutoModelClass): """simple docstring""" UpperCamelCase__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING A__: List[Any] = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
149
0
"""simple docstring""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a__ : Any = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' a__ : Dict = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' a__ : Optional[int] = r''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCamelCase_ ( datasets.Metric): """simple docstring""" def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , ) def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] ) -> Any: __SCREAMING_SNAKE_CASE = 0.0 for i, j in zip(UpperCAmelCase__ , UpperCAmelCase__ ): n_correct += 1.0 if math_equivalence.is_equiv(UpperCAmelCase__ , UpperCAmelCase__ ) else 0.0 __SCREAMING_SNAKE_CASE = n_correct / len(UpperCAmelCase__ ) return { "accuracy": accuracy, }
195
"""simple docstring""" a__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def UpperCAmelCase__ (): '''simple docstring''' __SCREAMING_SNAKE_CASE = input("Enter message: " ) __SCREAMING_SNAKE_CASE = input("Enter key [alphanumeric]: " ) __SCREAMING_SNAKE_CASE = input("Encrypt/Decrypt [e/d]: " ) if mode.lower().startswith("e" ): __SCREAMING_SNAKE_CASE = "encrypt" __SCREAMING_SNAKE_CASE = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) elif mode.lower().startswith("d" ): __SCREAMING_SNAKE_CASE = "decrypt" __SCREAMING_SNAKE_CASE = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ ) print(f"""\n{mode.title()}ed message:""" ) print(lowerCAmelCase_ ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "encrypt" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "decrypt" ) def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = key.upper() for symbol in message: __SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() ) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index] ) elif mode == "decrypt": num -= LETTERS.find(key[key_index] ) num %= len(lowerCAmelCase_ ) if symbol.isupper(): translated.append(LETTERS[num] ) elif symbol.islower(): translated.append(LETTERS[num].lower() ) key_index += 1 if key_index == len(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = 0 else: translated.append(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) if __name__ == "__main__": main()
195
1
'''simple docstring''' import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version __A = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") __A = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeqaSeqLM, "translation": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization __A = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } __A = sorted(arg_to_scheduler.keys()) __A = "{" + ", ".join(arg_to_scheduler_choices) + "}" class A ( pl.LightningModule ): def __init__( self , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__="base" , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ , ) -> Dict: '''simple docstring''' super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(lowerCamelCase__ ) lowercase__ = 0 lowercase__ = Path(self.hparams.output_dir ) lowercase__ = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: lowercase__ = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=lowerCamelCase__ , **lowerCamelCase__ , ) else: lowercase__ = config lowercase__ = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , lowerCamelCase__ , lowerCamelCase__ ): assert hasattr(self.config , lowerCamelCase__ ), F'''model config doesn\'t have a `{p}` attribute''' setattr(self.config , lowerCamelCase__ , getattr(self.hparams , lowerCamelCase__ ) ) if tokenizer is None: lowercase__ = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCamelCase__ , ) else: lowercase__ = tokenizer lowercase__ = MODEL_MODES[mode] if model is None: lowercase__ = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowerCamelCase__ , ) else: lowercase__ = model def A__ ( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> int: '''simple docstring''' lowercase__ = self.model_type.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ ) def A__ ( self ) -> str: '''simple docstring''' lowercase__ = arg_to_scheduler[self.hparams.lr_scheduler] lowercase__ = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) lowercase__ = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def A__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase__ = self.model lowercase__ = ["""bias""", """LayerNorm.weight"""] lowercase__ = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: lowercase__ = Adafactor( lowerCamelCase__ , lr=self.hparams.learning_rate , scale_parameter=lowerCamelCase__ , relative_step=lowerCamelCase__ ) else: lowercase__ = AdamW( lowerCamelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) lowercase__ = optimizer lowercase__ = self.get_lr_scheduler() return [optimizer], [scheduler] def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' return self.validation_step(lowerCamelCase__ , lowerCamelCase__ ) def A__ ( self , lowerCamelCase__ ) -> List[Any]: '''simple docstring''' return self.validation_end(lowerCamelCase__ ) def A__ ( self ) -> int: '''simple docstring''' lowercase__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores lowercase__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def A__ ( self , lowerCamelCase__ ) -> Dict: '''simple docstring''' if stage == "test": lowercase__ = len(self.test_dataloader().dataset ) else: lowercase__ = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=lowerCamelCase__ ) lowercase__ = len(self.train_dataloader().dataset ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ) -> List[Any]: '''simple docstring''' raise NotImplementedError("""You must implement this for your task""" ) def A__ ( self ) -> str: '''simple docstring''' return self.train_loader def A__ ( self ) -> Tuple: '''simple docstring''' return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=lowerCamelCase__ ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=lowerCamelCase__ ) def A__ ( self , lowerCamelCase__ ) -> int: '''simple docstring''' return os.path.join( self.hparams.data_dir , """cached_{}_{}_{}""".format( lowerCamelCase__ , list(filter(lowerCamelCase__ , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def A__ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' lowercase__ = self.output_dir.joinpath("""best_tfmr""" ) lowercase__ = self.step_count self.model.save_pretrained(lowerCamelCase__ ) self.tokenizer.save_pretrained(lowerCamelCase__ ) @staticmethod def A__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str: '''simple docstring''' parser.add_argument( """--model_name_or_path""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--config_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained config name or path if not the same as model_name""" ) parser.add_argument( """--tokenizer_name""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument( """--cache_dir""" , default=str(Path(lowerCamelCase__ ).parent / """test_run""" / """cache""" ) , type=lowerCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , ) parser.add_argument( """--encoder_layerdrop""" , type=lowerCamelCase__ , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--decoder_layerdrop""" , type=lowerCamelCase__ , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--dropout""" , type=lowerCamelCase__ , help="""Dropout probability (Optional). Goes into model.config""" , ) parser.add_argument( """--attention_dropout""" , type=lowerCamelCase__ , help="""Attention dropout probability (Optional). Goes into model.config""" , ) parser.add_argument("""--learning_rate""" , default=5e-5 , type=lowerCamelCase__ , help="""The initial learning rate for Adam.""" ) parser.add_argument( """--lr_scheduler""" , default="""linear""" , choices=lowerCamelCase__ , metavar=lowerCamelCase__ , type=lowerCamelCase__ , help="""Learning rate scheduler""" , ) parser.add_argument("""--weight_decay""" , default=0.0 , type=lowerCamelCase__ , help="""Weight decay if we apply some.""" ) parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=lowerCamelCase__ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCamelCase__ , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--num_workers""" , default=4 , type=lowerCamelCase__ , help="""kwarg passed to DataLoader""" ) parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=lowerCamelCase__ ) parser.add_argument("""--train_batch_size""" , default=32 , type=lowerCamelCase__ ) parser.add_argument("""--eval_batch_size""" , default=32 , type=lowerCamelCase__ ) parser.add_argument("""--adafactor""" , action="""store_true""" ) class A ( pl.Callback ): def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple: '''simple docstring''' if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class A ( pl.Callback ): def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]: '''simple docstring''' for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(lowerCamelCase__ ) class A ( pl.Callback ): def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' lowercase__ = trainer.lr_schedulers[0]["""scheduler"""] lowercase__ = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(lowerCamelCase__ ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int: '''simple docstring''' rank_zero_info("""***** Validation results *****""" ) lowercase__ = trainer.callback_metrics # Log results for key in sorted(lowerCamelCase__ ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(lowerCamelCase__ , str(metrics[key] ) ) ) def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any: '''simple docstring''' rank_zero_info("""***** Test results *****""" ) lowercase__ = trainer.callback_metrics # Log and save results to file lowercase__ = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" ) with open(lowerCamelCase__ , """w""" ) as writer: for key in sorted(lowerCamelCase__ ): if key not in ["log", "progress_bar"]: rank_zero_info("""{} = {}\n""".format(lowerCamelCase__ , str(metrics[key] ) ) ) writer.write("""{} = {}\n""".format(lowerCamelCase__ , str(metrics[key] ) ) ) def _A ( lowercase__ , lowercase__ ): # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( """--output_dir""" , default=str(Path(lowercase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowercase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument( """--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , ) parser.add_argument( """--fp16_opt_level""" , type=lowercase__ , default="""O2""" , help=( """For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].""" """See details at https://nvidia.github.io/apex/amp.html""" ) , ) parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowercase__ ) parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowercase__ , help="""Max gradient norm""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" ) parser.add_argument( """--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowercase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--seed""" , type=lowercase__ , default=42 , help="""random seed for initialization""" ) parser.add_argument( """--data_dir""" , default=str(Path(lowercase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowercase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , ) def _A ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=True , lowercase__=[] , lowercase__=None , lowercase__=None , **lowercase__ , ): pl.seed_everything(args.seed ) # init model lowercase__ = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=lowercase__ ) # add custom checkpoints if checkpoint_callback is None: lowercase__ = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(lowercase__ ) if logging_callback is None: lowercase__ = LoggingCallback() lowercase__ = {} if args.fpaa: lowercase__ = 16 if args.gpus > 1: lowercase__ = """auto""" lowercase__ = """ddp""" lowercase__ = args.accumulate_grad_batches lowercase__ = None lowercase__ = """auto""" lowercase__ = pl.Trainer.from_argparse_args( lowercase__ , weights_summary=lowercase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowercase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowercase__ , ) if args.do_train: trainer.fit(lowercase__ ) else: print("""RAG modeling tests with new set functions successfuly executed!""" ) return trainer
164
'''simple docstring''' def _A ( ): lowercase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowercase__ = 6 lowercase__ = 1 lowercase__ = 1901 lowercase__ = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowercase__ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowercase__ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowercase__ = day - days_per_month[month - 2] if month > 12: year += 1 lowercase__ = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
164
1
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCamelCase__: __magic_name__ : List[str] __magic_name__ : Optional[str] = None # Automatically constructed __magic_name__ : ClassVar[str] = "dict" __magic_name__ : ClassVar[Any] = None __magic_name__ : str = field(default="Translation" , init=lowerCAmelCase , repr=lowerCAmelCase ) def __call__( self : Union[str, Any] )-> str: """simple docstring""" return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def a__( self : int )-> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Value return {k: Value('''string''' ) for k in sorted(self.languages )} @dataclass class UpperCamelCase__: __magic_name__ : Optional[List] = None __magic_name__ : Optional[int] = None __magic_name__ : Optional[str] = None # Automatically constructed __magic_name__ : ClassVar[str] = "dict" __magic_name__ : ClassVar[Any] = None __magic_name__ : str = field(default="TranslationVariableLanguages" , init=lowerCAmelCase , repr=lowerCAmelCase ) def a__( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None UpperCAmelCase = len(self.languages ) if self.languages else None def __call__( self : int )-> Optional[Any]: """simple docstring""" return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} ) def a__( self : Optional[int] , lowerCAmelCase : Dict )-> Tuple: """simple docstring""" UpperCAmelCase = set(self.languages ) if self.languages and set(lowerCAmelCase ) - lang_set: raise ValueError( F"""Some languages in example ({", ".join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(lowerCAmelCase )}).""" ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. UpperCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(lowerCAmelCase , lowerCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. UpperCAmelCase , UpperCAmelCase = zip(*sorted(lowerCAmelCase ) ) return {"language": languages, "translation": translations} def a__( self : Any )-> Union["FeatureType", Dict[str, "FeatureType"]]: """simple docstring""" from .features import Sequence, Value return { "language": Sequence(Value('''string''' ) ), "translation": Sequence(Value('''string''' ) ), }
363
'''simple docstring''' import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def lowerCamelCase__ ( A : str="" ): '''simple docstring''' UpperCAmelCase = tempfile.mkdtemp() return os.path.join(A , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class UpperCamelCase__( unittest.TestCase ): def a__( self : int )-> int: """simple docstring""" UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5 UpperCAmelCase = AgentAudio(lowerCAmelCase ) UpperCAmelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(lowerCAmelCase ) ) # Ensure that the file contains the same value as the original tensor UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase ) , atol=1E-4 ) ) def a__( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5 UpperCAmelCase = get_new_path(suffix='''.wav''' ) sf.write(lowerCAmelCase , lowerCAmelCase , 16000 ) UpperCAmelCase = AgentAudio(lowerCAmelCase ) self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) ) self.assertEqual(agent_type.to_string() , lowerCAmelCase ) @require_vision @require_torch class UpperCamelCase__( unittest.TestCase ): def a__( self : List[Any] )-> Any: """simple docstring""" UpperCAmelCase = torch.randint(0 , 256 , (64, 64, 3) ) UpperCAmelCase = AgentImage(lowerCAmelCase ) UpperCAmelCase = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1E-4 ) ) self.assertIsInstance(agent_type.to_raw() , Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCAmelCase ) ) def a__( self : List[Any] )-> List[Any]: """simple docstring""" UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' UpperCAmelCase = Image.open(lowerCAmelCase ) UpperCAmelCase = AgentImage(lowerCAmelCase ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCAmelCase ) ) def a__( self : Optional[Any] )-> List[str]: """simple docstring""" UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' UpperCAmelCase = Image.open(lowerCAmelCase ) UpperCAmelCase = AgentImage(lowerCAmelCase ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(lowerCAmelCase ) ) class UpperCamelCase__( unittest.TestCase ): def a__( self : int )-> Any: """simple docstring""" UpperCAmelCase = '''Hey!''' UpperCAmelCase = AgentText(lowerCAmelCase ) self.assertEqual(lowerCAmelCase , agent_type.to_string() ) self.assertEqual(lowerCAmelCase , agent_type.to_raw() ) self.assertEqual(lowerCAmelCase , lowerCAmelCase )
91
0
'''simple docstring''' import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __snake_case ( UpperCAmelCase_ : Dict ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] ): lowerCamelCase_ = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue lowerCamelCase_ = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) lowerCamelCase_ = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) lowerCamelCase_ = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) lowerCamelCase_ = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) lowerCamelCase_ = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) lowerCamelCase_ = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) lowerCamelCase_ = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) lowerCamelCase_ = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) lowerCamelCase_ = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) lowerCamelCase_ = key.replace("image_encoder.module" , "flava.image_model" ) lowerCamelCase_ = key.replace("text_encoder.module" , "flava.text_model" ) lowerCamelCase_ = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) lowerCamelCase_ = key.replace("mm_encoder.module" , "flava.multimodal_model" ) lowerCamelCase_ = key.replace("text_projection" , "flava.text_projection" ) lowerCamelCase_ = key.replace("image_projection" , "flava.image_projection" ) lowerCamelCase_ = value.float() for key, value in codebook_state_dict.items(): lowerCamelCase_ = value return upgrade @torch.no_grad() def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=None ): if config_path is not None: lowerCamelCase_ = FlavaConfig.from_pretrained(UpperCAmelCase_ ) else: lowerCamelCase_ = FlavaConfig() lowerCamelCase_ = FlavaForPreTraining(UpperCAmelCase_ ).eval() lowerCamelCase_ = convert_dalle_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , save_checkpoint=UpperCAmelCase_ ) if os.path.exists(UpperCAmelCase_ ): lowerCamelCase_ = torch.load(UpperCAmelCase_ , map_location="cpu" ) else: lowerCamelCase_ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" ) lowerCamelCase_ = upgrade_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) hf_model.load_state_dict(UpperCAmelCase_ ) lowerCamelCase_ = hf_model.state_dict() lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) lowerCamelCase_ = count_parameters(UpperCAmelCase_ ) + count_parameters(UpperCAmelCase_ ) assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) hf_model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": a_ : Optional[int] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") a_ : Union[str, Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
55
'''simple docstring''' from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging A_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _snake_case ( _a ): def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : CLIPSegForImageSegmentation ,SCREAMING_SNAKE_CASE__ : CLIPSegProcessor ,SCREAMING_SNAKE_CASE__ : AutoencoderKL ,SCREAMING_SNAKE_CASE__ : CLIPTextModel ,SCREAMING_SNAKE_CASE__ : CLIPTokenizer ,SCREAMING_SNAKE_CASE__ : UNetaDConditionModel ,SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,SCREAMING_SNAKE_CASE__ : StableDiffusionSafetyChecker ,SCREAMING_SNAKE_CASE__ : CLIPImageProcessor ,): super().__init__() if hasattr(scheduler.config ,"steps_offset" ) and scheduler.config.steps_offset != 1: SCREAMING_SNAKE_CASE:Union[str, Any] = ( F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`''' F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ''' "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1" ,"1.0.0" ,SCREAMING_SNAKE_CASE__ ,standard_warn=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Tuple = dict(scheduler.config ) SCREAMING_SNAKE_CASE:Union[str, Any] = 1 SCREAMING_SNAKE_CASE:Dict = FrozenDict(SCREAMING_SNAKE_CASE__ ) if hasattr(scheduler.config ,"skip_prk_steps" ) and scheduler.config.skip_prk_steps is False: SCREAMING_SNAKE_CASE:List[Any] = ( F'''The configuration file of this scheduler: {scheduler} has not set the configuration''' " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make" " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to" " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face" " Hub, it would be very nice if you could open a Pull request for the" " `scheduler/scheduler_config.json` file" ) deprecate("skip_prk_steps not set" ,"1.0.0" ,SCREAMING_SNAKE_CASE__ ,standard_warn=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Tuple = dict(scheduler.config ) SCREAMING_SNAKE_CASE:int = True SCREAMING_SNAKE_CASE:Optional[int] = FrozenDict(SCREAMING_SNAKE_CASE__ ) if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) self.register_modules( segmentation_model=SCREAMING_SNAKE_CASE__ ,segmentation_processor=SCREAMING_SNAKE_CASE__ ,vae=SCREAMING_SNAKE_CASE__ ,text_encoder=SCREAMING_SNAKE_CASE__ ,tokenizer=SCREAMING_SNAKE_CASE__ ,unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ,safety_checker=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__ ,) def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory SCREAMING_SNAKE_CASE:Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : str ): self.enable_attention_slicing(SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : List[str] ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) SCREAMING_SNAKE_CASE:str = torch.device("cuda" ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __UpperCamelCase ( self : Any ): if self.device != torch.device("meta" ) or not hasattr(self.unet ,"_hf_hook" ): return self.device for module in self.unet.modules(): if ( hasattr(SCREAMING_SNAKE_CASE__ ,"_hf_hook" ) and hasattr(module._hf_hook ,"execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, List[str]] ,SCREAMING_SNAKE_CASE__ : Union[torch.FloatTensor, PIL.Image.Image] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int = 512 ,SCREAMING_SNAKE_CASE__ : int = 512 ,SCREAMING_SNAKE_CASE__ : int = 50 ,SCREAMING_SNAKE_CASE__ : float = 7.5 ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : Dict ,): SCREAMING_SNAKE_CASE:str = self.segmentation_processor( text=[text] ,images=[image] ,padding="max_length" ,return_tensors="pt" ).to(self.device ) SCREAMING_SNAKE_CASE:Union[str, Any] = self.segmentation_model(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() SCREAMING_SNAKE_CASE:Optional[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )[0].resize(image.size ) # Run inpainting pipeline with the generated mask SCREAMING_SNAKE_CASE:Any = StableDiffusionInpaintPipeline( vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,) return inpainting_pipeline( prompt=SCREAMING_SNAKE_CASE__ ,image=SCREAMING_SNAKE_CASE__ ,mask_image=SCREAMING_SNAKE_CASE__ ,height=SCREAMING_SNAKE_CASE__ ,width=SCREAMING_SNAKE_CASE__ ,num_inference_steps=SCREAMING_SNAKE_CASE__ ,guidance_scale=SCREAMING_SNAKE_CASE__ ,negative_prompt=SCREAMING_SNAKE_CASE__ ,num_images_per_prompt=SCREAMING_SNAKE_CASE__ ,eta=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,latents=SCREAMING_SNAKE_CASE__ ,output_type=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,callback=SCREAMING_SNAKE_CASE__ ,callback_steps=SCREAMING_SNAKE_CASE__ ,)
139
0
from __future__ import annotations __UpperCamelCase : str = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class lowercase__ : def __init__( self : Dict , UpperCamelCase__ : dict[str, list[str]] , UpperCamelCase__ : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = graph # mapping node to its parent in resulting breadth first tree SCREAMING_SNAKE_CASE : dict[str, str | None] = {} SCREAMING_SNAKE_CASE : str = source_vertex def __A ( self : List[str] ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = {self.source_vertex} SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : List[Any] = [self.source_vertex] # first in first out queue while queue: SCREAMING_SNAKE_CASE : Tuple = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[int] = vertex queue.append(UpperCamelCase__ ) def __A ( self : Union[str, Any] , UpperCamelCase__ : str ): '''simple docstring''' if target_vertex == self.source_vertex: return self.source_vertex SCREAMING_SNAKE_CASE : Dict = self.parent.get(UpperCamelCase__ ) if target_vertex_parent is None: SCREAMING_SNAKE_CASE : Tuple = ( f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}""" ) raise ValueError(UpperCamelCase__ ) return self.shortest_path(UpperCamelCase__ ) + f"""->{target_vertex}""" if __name__ == "__main__": __UpperCamelCase : List[str] = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
258
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase__ ( UpperCamelCase_ , unittest.TestCase): UpperCamelCase_ = KandinskyInpaintPipeline UpperCamelCase_ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""] UpperCamelCase_ = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image""", ] UpperCamelCase_ = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] UpperCamelCase_ = False @property def __A ( self : Tuple ): '''simple docstring''' return 32 @property def __A ( self : List[str] ): '''simple docstring''' return 32 @property def __A ( self : List[Any] ): '''simple docstring''' return self.time_input_dim @property def __A ( self : List[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __A ( self : List[Any] ): '''simple docstring''' return 100 @property def __A ( self : Optional[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def __A ( self : int ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[str] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder.eval() return text_encoder @property def __A ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = { '''in_channels''': 9, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''text_image''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''text_image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCamelCase__ ) return model @property def __A ( self : int ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __A ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def __A ( self : str ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet SCREAMING_SNAKE_CASE : int = self.dummy_movq SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , ) SCREAMING_SNAKE_CASE : Any = { '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def __A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ ) # create init_image SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) ) # create mask SCREAMING_SNAKE_CASE : Tuple = np.ones((64, 64) , dtype=np.floataa ) SCREAMING_SNAKE_CASE : List[Any] = 0 if str(UpperCamelCase__ ).startswith('''mps''' ): SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCamelCase__ ) else: SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''horse''', '''image''': init_image, '''mask_image''': mask, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 2, '''guidance_scale''': 4.0, '''output_type''': '''np''', } return inputs def __A ( self : Tuple ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE : List[Any] = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.images SCREAMING_SNAKE_CASE : Any = pipe( **self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0] SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1] print(f"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : str = np.array( [0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __A ( self : str ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase): def __A ( self : str ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __A ( self : Any ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) SCREAMING_SNAKE_CASE : Optional[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) SCREAMING_SNAKE_CASE : int = np.ones((768, 768) , dtype=np.floataa ) SCREAMING_SNAKE_CASE : Optional[int] = 0 SCREAMING_SNAKE_CASE : Optional[Any] = '''a hat''' SCREAMING_SNAKE_CASE : Dict = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.to(UpperCamelCase__ ) pipeline.set_progress_bar_config(disable=UpperCamelCase__ ) SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pipe_prior( UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple() SCREAMING_SNAKE_CASE : Optional[Any] = pipeline( UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , ) SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
258
1
"""simple docstring""" import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __lowerCAmelCase : Union[str, Any] = BertJapaneseTokenizer __lowerCAmelCase : Tuple = False __lowerCAmelCase : Optional[Any] = True def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' super().setUp() UpperCAmelCase : str = [ """[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは""", """世界""", """##世界""", """、""", """##、""", """。""", """##。""", ] UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' UpperCAmelCase : int = """こんにちは、世界。 \nこんばんは、世界。""" UpperCAmelCase : str = """こんにちは 、 世界 。 こんばんは 、 世界 。""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.get_input_output_texts(lowerCAmelCase_ ) UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase : str = tokenizer.decode(lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ ) return text, ids def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" ) self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" ) self.assertIsNotNone(lowerCAmelCase_ ) UpperCAmelCase : Optional[Any] = """こんにちは、世界。\nこんばんは、世界。""" UpperCAmelCase : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) UpperCAmelCase : Any = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(lowerCAmelCase_ , """wb""" ) as handle: pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ ) with open(lowerCAmelCase_ , """rb""" ) as handle: UpperCAmelCase : Optional[Any] = pickle.load(lowerCAmelCase_ ) UpperCAmelCase : List[Any] = tokenizer_new.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Any = MecabTokenizer(mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' try: UpperCAmelCase : Tuple = MecabTokenizer(mecab_dic="""unidic_lite""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' try: UpperCAmelCase : Dict = MecabTokenizer(mecab_dic="""unidic""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = MecabTokenizer(do_lower_case=lowerCAmelCase_ , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' try: UpperCAmelCase : List[str] = MecabTokenizer( do_lower_case=lowerCAmelCase_ , normalize_text=lowerCAmelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = MecabTokenizer(normalize_text=lowerCAmelCase_ , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" ) self.assertIsNotNone(lowerCAmelCase_ ) UpperCAmelCase : Any = """こんにちは、世界。\nこんばんは、世界。""" UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) UpperCAmelCase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(lowerCAmelCase_ , """wb""" ) as handle: pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ ) with open(lowerCAmelCase_ , """rb""" ) as handle: UpperCAmelCase : Optional[Any] = pickle.load(lowerCAmelCase_ ) UpperCAmelCase : List[str] = tokenizer_new.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Any = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Any = SudachiTokenizer(do_lower_case=lowerCAmelCase_ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase : List[str] = SudachiTokenizer(normalize_text=lowerCAmelCase_ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[int] = SudachiTokenizer(trim_whitespace=lowerCAmelCase_ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" ) self.assertIsNotNone(lowerCAmelCase_ ) UpperCAmelCase : Tuple = """こんにちは、世界。\nこんばんは、世界。""" UpperCAmelCase : Optional[int] = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) UpperCAmelCase : Any = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(lowerCAmelCase_ , """wb""" ) as handle: pickle.dump(lowerCAmelCase_ , lowerCAmelCase_ ) with open(lowerCAmelCase_ , """rb""" ) as handle: UpperCAmelCase : Dict = pickle.load(lowerCAmelCase_ ) UpperCAmelCase : int = tokenizer_new.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : Dict = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Dict = JumanppTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase : Any = JumanppTokenizer(normalize_text=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase : Optional[Any] = JumanppTokenizer(trim_whitespace=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""] UpperCAmelCase : int = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase : Union[str, Any] = i UpperCAmelCase : Any = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase : Dict = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" ) UpperCAmelCase : Any = tokenizer.subword_tokenizer UpperCAmelCase : List[str] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] ) UpperCAmelCase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" ) self.assertListEqual(lowerCAmelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" ) UpperCAmelCase : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase : int = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __lowerCAmelCase : Dict = BertJapaneseTokenizer __lowerCAmelCase : Optional[int] = False def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' super().setUp() UpperCAmelCase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **lowerCAmelCase_ ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[str] = """こんにちは、世界。 \nこんばんは、世界。""" UpperCAmelCase : List[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" ) UpperCAmelCase : int = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" ) self.assertListEqual( lowerCAmelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] UpperCAmelCase : Any = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase : Optional[Any] = i UpperCAmelCase : Tuple = CharacterTokenizer(vocab=lowerCAmelCase_ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] ) self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] ) def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" ) UpperCAmelCase : int = tokenizer.encode("""ありがとう。""" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase : List[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Dict = """cl-tohoku/bert-base-japanese""" UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ ) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : List[Any] = """cl-tohoku/bert-base-japanese""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) ) UpperCAmelCase : Optional[Any] = """bert-base-cased""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertJapaneseTokenizer.from_pretrained(lowerCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) )
109
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case : List[Any] = logging.get_logger(__name__) _snake_case : List[Any] = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = """beit""" def __init__( self : List[Any] , lowerCAmelCase_ : Tuple=8_1_9_2 , lowerCAmelCase_ : Optional[int]=7_6_8 , lowerCAmelCase_ : int=1_2 , lowerCAmelCase_ : Optional[int]=1_2 , lowerCAmelCase_ : Any=3_0_7_2 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=1e-12 , lowerCAmelCase_ : int=2_2_4 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=False , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : Union[str, Any]=0.1 , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[Any]=[3, 5, 7, 1_1] , lowerCAmelCase_ : Optional[Any]=[1, 2, 3, 6] , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=0.4 , lowerCAmelCase_ : Tuple=2_5_6 , lowerCAmelCase_ : Any=1 , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=2_5_5 , **lowerCAmelCase_ : Any , ) -> Dict: super().__init__(**lowerCAmelCase_ ) __lowerCAmelCase = vocab_size __lowerCAmelCase = hidden_size __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = intermediate_size __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = attention_probs_dropout_prob __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = image_size __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = use_mask_token __lowerCAmelCase = use_absolute_position_embeddings __lowerCAmelCase = use_relative_position_bias __lowerCAmelCase = use_shared_relative_position_bias __lowerCAmelCase = layer_scale_init_value __lowerCAmelCase = drop_path_rate __lowerCAmelCase = use_mean_pooling # decode head attributes (semantic segmentation) __lowerCAmelCase = out_indices __lowerCAmelCase = pool_scales # auxiliary head attributes (semantic segmentation) __lowerCAmelCase = use_auxiliary_head __lowerCAmelCase = auxiliary_loss_weight __lowerCAmelCase = auxiliary_channels __lowerCAmelCase = auxiliary_num_convs __lowerCAmelCase = auxiliary_concat_input __lowerCAmelCase = semantic_loss_ignore_index class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = version.parse("""1.11""" ) @property def lowercase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def lowercase ( self : Optional[Any] ) -> float: return 1e-4
284
0
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __UpperCAmelCase : Dict = get_logger(__name__) __UpperCAmelCase : Optional[Any] = Path(__file__).parent / "model_card_template.md" __UpperCAmelCase : int = uuida().hex __UpperCAmelCase : Optional[Any] = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES __UpperCAmelCase : Optional[Any] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES __UpperCAmelCase : Union[str, Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" def A__ ( SCREAMING_SNAKE_CASE__ = None) -> str: __snake_case: int = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += F'''; torch/{_torch_version}''' if is_flax_available(): ua += F'''; jax/{_jax_version}''' ua += F'''; flax/{_flax_version}''' if is_onnx_available(): ua += F'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""" , """""").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__): ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items()) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__): ua += "; " + user_agent return ua def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None) -> List[Any]: if token is None: __snake_case: List[Any] = HfFolder.get_token() if organization is None: __snake_case: List[Any] = whoami(SCREAMING_SNAKE_CASE__)["""name"""] return F'''{username}/{model_id}''' else: return F'''{organization}/{model_id}''' def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]: if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""") if hasattr(SCREAMING_SNAKE_CASE__ , """local_rank""") and args.local_rank not in [-1, 0]: return __snake_case: Tuple = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , """hub_token""") else None __snake_case: Optional[Any] = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__) __snake_case: Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , """dataset_name""") else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , """gradient_accumulation_steps""") else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta1""") else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , """adam_beta2""") else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , """adam_weight_decay""") else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , """adam_epsilon""") else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , """lr_scheduler""") else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , """lr_warmup_steps""") else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , """ema_inv_gamma""") else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , """ema_power""") else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , """ema_max_decay""") else None , mixed_precision=args.mixed_precision , ) __snake_case: str = os.path.join(args.output_dir , """README.md""") model_card.save(SCREAMING_SNAKE_CASE__) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None) -> Optional[int]: if resolved_file is None or commit_hash is not None: return commit_hash __snake_case: List[str] = str(Path(SCREAMING_SNAKE_CASE__).as_posix()) __snake_case: Optional[int] = re.search(r"""snapshots/([^/]+)/""" , SCREAMING_SNAKE_CASE__) if search is None: return None __snake_case: Tuple = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __UpperCAmelCase : Tuple = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) __UpperCAmelCase : int = os.path.join(hf_cache_home, "diffusers") def A__ ( SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None) -> None: if new_cache_dir is None: __snake_case: List[Any] = DIFFUSERS_CACHE if old_cache_dir is None: __snake_case: Dict = old_diffusers_cache __snake_case: str = Path(SCREAMING_SNAKE_CASE__).expanduser() __snake_case: List[str] = Path(SCREAMING_SNAKE_CASE__).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*"""): if old_blob_path.is_file() and not old_blob_path.is_symlink(): __snake_case: Optional[Any] = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__) os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) try: os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""") # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __UpperCAmelCase : Optional[Any] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): __UpperCAmelCase : Tuple = 0 else: with open(cache_version_file) as f: try: __UpperCAmelCase : Optional[int] = int(f.read()) except ValueError: __UpperCAmelCase : Dict = 0 if cache_version < 1: __UpperCAmelCase : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: __UpperCAmelCase : List[Any] = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f'There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ' "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f'There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ' "the directory exists and can be written to." ) def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None) -> str: if variant is not None: __snake_case: List[Any] = weights_name.split(""".""") __snake_case: str = splits[:-1] + [variant] + splits[-1:] __snake_case: Optional[int] = """.""".join(SCREAMING_SNAKE_CASE__) return weights_name def A__ ( SCREAMING_SNAKE_CASE__ , *, SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ) -> Dict: __snake_case: Union[str, Any] = str(SCREAMING_SNAKE_CASE__) if os.path.isfile(SCREAMING_SNAKE_CASE__): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE__): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)): # Load from a PyTorch checkpoint __snake_case: List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)): __snake_case: List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) return model_file else: raise EnvironmentError( F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''') else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE__).base_version) >= version.parse("""0.20.0""") ): try: __snake_case: str = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) warnings.warn( F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , ) return model_file except: # noqa: E722 warnings.warn( F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , ) try: # 2. Load model file as usual __snake_case: str = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""") except RevisionNotFoundError: raise EnvironmentError( F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' """this model name. Check the model page at """ F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''') except EntryNotFoundError: raise EnvironmentError( F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''') except HTTPError as err: raise EnvironmentError( F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''') except ValueError: raise EnvironmentError( F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' F''' directory containing a file named {weights_name} or''' """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""") except EnvironmentError: raise EnvironmentError( F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' F'''containing a file named {weights_name}''')
293
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __snake_case ( __lowerCamelCase ): '''simple docstring''' def UpperCAmelCase__ ( self : Optional[int] ): __snake_case: Optional[int] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) ) self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) ) class __snake_case : '''simple docstring''' def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ): __snake_case: List[Any] = parent __snake_case: Dict = batch_size __snake_case: int = image_size __snake_case: Tuple = patch_size __snake_case: Tuple = num_channels __snake_case: str = last_hidden_size __snake_case: Dict = num_attention_heads __snake_case: Dict = hidden_act __snake_case: Tuple = conv_kernel_size __snake_case: List[str] = output_stride __snake_case: List[str] = hidden_dropout_prob __snake_case: Optional[Any] = attention_probs_dropout_prob __snake_case: int = classifier_dropout_prob __snake_case: List[Any] = use_labels __snake_case: Union[str, Any] = is_training __snake_case: Union[str, Any] = num_labels __snake_case: str = initializer_range __snake_case: List[Any] = scope def UpperCAmelCase__ ( self : List[Any] ): __snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case: Tuple = None __snake_case: Any = None if self.use_labels: __snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case: Any = self.get_config() return config, pixel_values, labels, pixel_labels def UpperCAmelCase__ ( self : int ): return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ): __snake_case: List[Any] = MobileViTModel(config=A ) model.to(A ) model.eval() __snake_case: int = model(A ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ): __snake_case: str = self.num_labels __snake_case: Optional[int] = MobileViTForImageClassification(A ) model.to(A ) model.eval() __snake_case: Union[str, Any] = model(A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ): __snake_case: List[Any] = self.num_labels __snake_case: Dict = MobileViTForSemanticSegmentation(A ) model.to(A ) model.eval() __snake_case: Union[str, Any] = model(A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case: Tuple = model(A , labels=A ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def UpperCAmelCase__ ( self : Dict ): __snake_case: Tuple = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs __snake_case: Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase__ = ( { """feature-extraction""": MobileViTModel, """image-classification""": MobileViTForImageClassification, """image-segmentation""": MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False def UpperCAmelCase__ ( self : List[str] ): __snake_case: List[Any] = MobileViTModelTester(self ) __snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A ) def UpperCAmelCase__ ( self : str ): self.config_tester.run_common_tests() @unittest.skip(reason="""MobileViT does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : List[Any] ): pass @unittest.skip(reason="""MobileViT does not support input and output embeddings""" ) def UpperCAmelCase__ ( self : Dict ): pass @unittest.skip(reason="""MobileViT does not output attentions""" ) def UpperCAmelCase__ ( self : Optional[Any] ): pass def UpperCAmelCase__ ( self : str ): __snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case: Optional[Any] = model_class(A ) __snake_case: int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case: Optional[int] = [*signature.parameters.keys()] __snake_case: List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def UpperCAmelCase__ ( self : Optional[int] ): pass def UpperCAmelCase__ ( self : Dict ): __snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def UpperCAmelCase__ ( self : Dict ): def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ): __snake_case: List[str] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): __snake_case: str = model(**self._prepare_for_class(A , A ) ) __snake_case: Optional[int] = outputs.hidden_states __snake_case: Any = 5 self.assertEqual(len(A ) , A ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case: Union[str, Any] = 2 for i in range(len(A ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case: Optional[Any] = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case: Dict = True check_hidden_states_output(A , A , A ) def UpperCAmelCase__ ( self : int ): __snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def UpperCAmelCase__ ( self : Union[str, Any] ): __snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case: List[Any] = MobileViTModel.from_pretrained(A ) self.assertIsNotNone(A ) def A__ ( ) -> Optional[int]: __snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") return image @require_torch @require_vision class __snake_case ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase__ ( self : Dict ): return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self : List[Any] ): __snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A ) __snake_case: str = self.default_image_processor __snake_case: Optional[Any] = prepare_img() __snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A ) # forward pass with torch.no_grad(): __snake_case: Dict = model(**A ) # verify the logits __snake_case: List[str] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , A ) __snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Tuple ): __snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __snake_case: List[str] = model.to(A ) __snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __snake_case: List[Any] = prepare_img() __snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A ) # forward pass with torch.no_grad(): __snake_case: List[Any] = model(**A ) __snake_case: Optional[int] = outputs.logits # verify the logits __snake_case: Dict = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , A ) __snake_case: Optional[int] = torch.tensor( [ [[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]], [[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]], [[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]], ] , device=A , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Dict ): __snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __snake_case: str = model.to(A ) __snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" ) __snake_case: List[str] = prepare_img() __snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A ) # forward pass with torch.no_grad(): __snake_case: Dict = model(**A ) __snake_case: List[Any] = outputs.logits.detach().cpu() __snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] ) __snake_case: str = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , A ) __snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A ) __snake_case: Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , A )
293
1
import math import random def A__ ( __lowerCamelCase, __lowerCamelCase = False ): if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase = 0.02 def A__ ( __lowerCamelCase, __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 ) for _ in range(__lowerCamelCase ): # Forward propagation SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a # Error delta SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 1_00 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase = int(input("Expected value: ")) __UpperCAmelCase = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
299
def A__ ( __lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise ValueError('''Input must be an integer''' ) if input_num <= 0: raise ValueError('''Input must be positive''' ) return sum( divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
299
1
'''simple docstring''' from __future__ import annotations import math def UpperCAmelCase_ (__a : int ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True __lowerCAmelCase = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)] def UpperCAmelCase_ (__a : int ): """simple docstring""" if not isinstance(__a , __a ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) _a : Optional[int] = [] for num in range(len(__a ) ): _a : List[str] = 0 while 2 * i * i <= odd_composites[num]: _a : Optional[Any] = odd_composites[num] - 2 * i * i if is_prime(__a ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__a ) == n: return list_nums return [] def UpperCAmelCase_ (): """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(f'''{solution() = }''')
352
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
5
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __snake_case : List[Any] = {"""processing_wav2vec2_with_lm""": ["""Wav2Vec2ProcessorWithLM"""]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __snake_case : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
248
import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A__(unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def UpperCamelCase__ ( self ) -> List[Any]: a_ , a_ : Any = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-canny""" , from_pt=_lowercase , dtype=jnp.bfloataa ) a_ , a_ : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa ) a_ : Union[str, Any] = controlnet_params a_ : int = """bird""" a_ : Tuple = jax.device_count() a_ : List[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) a_ : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" ) a_ : Optional[Any] = pipe.prepare_image_inputs([canny_image] * num_samples ) a_ : int = jax.random.PRNGKey(0 ) a_ : Union[str, Any] = jax.random.split(_lowercase , jax.device_count() ) a_ : Any = replicate(_lowercase ) a_ : Optional[int] = shard(_lowercase ) a_ : List[Any] = shard(_lowercase ) a_ : int = pipe( prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) a_ : Optional[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a_ : str = images[0, 253:256, 253:256, -1] a_ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a_ : List[Any] = jnp.array( [0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCamelCase__ ( self ) -> str: a_ , a_ : str = FlaxControlNetModel.from_pretrained( """lllyasviel/sd-controlnet-openpose""" , from_pt=_lowercase , dtype=jnp.bfloataa ) a_ , a_ : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa ) a_ : Tuple = controlnet_params a_ : str = """Chef in the kitchen""" a_ : Optional[Any] = jax.device_count() a_ : Any = pipe.prepare_text_inputs([prompts] * num_samples ) a_ : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" ) a_ : Any = pipe.prepare_image_inputs([pose_image] * num_samples ) a_ : str = jax.random.PRNGKey(0 ) a_ : int = jax.random.split(_lowercase , jax.device_count() ) a_ : Optional[int] = replicate(_lowercase ) a_ : Tuple = shard(_lowercase ) a_ : List[Any] = shard(_lowercase ) a_ : str = pipe( prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) a_ : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) a_ : List[str] = images[0, 253:256, 253:256, -1] a_ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) a_ : Optional[int] = jnp.array( [[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
248
1
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase__ : int = [ 'word_embeddings_layernorm.weight', 'word_embeddings_layernorm.bias', 'input_layernorm.weight', 'input_layernorm.bias', 'post_attention_layernorm.weight', 'post_attention_layernorm.bias', 'self_attention.dense.bias', 'mlp.dense_4h_to_h.bias', 'ln_f.weight', 'ln_f.bias', ] UpperCAmelCase__ : int = [ 'mlp.dense_4h_to_h.weight', 'self_attention.dense.weight', ] def lowerCamelCase__ ( a , a ) -> int: _A: Union[str, Any] = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks _A: Tuple = int(re.match(R'''.*layer_(\d*).*''' , a )[1] ) layer_number -= 3 return f"""h.{layer_number}.""" + key def lowerCamelCase__ ( a ) -> Optional[Any]: if dtype == torch.bool: return 1 / 8 _A: Tuple = re.search(R'''[^\d](\d+)$''' , str(a ) ) if bit_search is None: raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" ) _A: Any = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase__ ( a , a , a , a , a ) -> Tuple: # Construct model if bloom_config_file == "": _A: Union[str, Any] = BloomConfig() else: _A: Optional[Any] = BloomConfig.from_json_file(a ) if shard_model: _A: Dict = os.listdir(a ) _A: Optional[Any] = sorted(filter(lambda a : s.startswith('''layer''' ) and "model_00" in s , a ) ) _A: Tuple = {'''weight_map''': {}, '''metadata''': {}} _A: Optional[Any] = 0 _A: int = None _A: str = BloomConfig() for j, file in enumerate(a ): print('''Processing file: {}'''.format(a ) ) _A: Tuple = None for i in range(a ): # load all TP files _A: str = file.replace('''model_00''' , f"""model_0{i}""" ) _A: str = torch.load(os.path.join(a , a ) , map_location='''cpu''' ) # Rename keys in the transformers names _A: str = list(temp.keys() ) for key in keys: _A: str = temp.pop(a ) if tensors is None: _A: int = temp else: for key in tensors.keys(): if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _A: Union[str, Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _A: int = torch.cat([tensors[key], temp[key]] , dim=a ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _A: str = tensors[key] / pretraining_tp torch.save( a , os.path.join( a , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(a ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): _A: Dict = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: _A: Union[str, Any] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(a ) ).zfill(5 ) ) _A: Union[str, Any] = BloomConfig() _A: str = pytorch_dump_folder_path + '''/''' + CONFIG_NAME _A: Optional[int] = total_size with open(a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(a , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: _A: Optional[Any] = json.dumps(a , indent=2 , sort_keys=a ) + '''\n''' f.write(a ) else: _A: int = BloomModel(a ) _A: Tuple = os.listdir(a ) _A: str = sorted(filter(lambda a : s.startswith('''layer''' ) and "model_00" in s , a ) ) _A: Optional[Any] = None for i, file in enumerate(a ): _A: str = None for i in range(a ): # load all TP files _A: Tuple = file.replace('''model_00''' , f"""model_0{i}""" ) _A: List[Any] = torch.load(os.path.join(a , a ) , map_location='''cpu''' ) # Rename keys in the transformers names _A: Tuple = list(temp.keys() ) for key in keys: _A: int = temp.pop(a ) if tensors is None: _A: Union[str, Any] = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel _A: Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks _A: Optional[int] = torch.cat([tensors[key], temp[key]] , dim=a ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): _A: List[Any] = tensors[key] / pretraining_tp _A: str = model.load_state_dict(a , strict=a ) assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: _A: Union[str, Any] = set(other_keys.missing_keys ) else: _A: Any = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(a , exist_ok=a ) _A: Optional[Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME _A: Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: _A: List[str] = model.to(config.torch_dtype ) torch.save(model.state_dict() , a ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(a , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": UpperCAmelCase__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bloom_checkpoint_path', default=None, type=str, required=True, help='Path to the Megatron-LM checkpoint path.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--bloom_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--shard_model', action='store_true', help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint', ) parser.add_argument( '--pretraining_tp', default=4, type=int, help='Pretraining TP rank that has been used when training the model in Megatron-LM \n', ) UpperCAmelCase__ : Any = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
301
from __future__ import annotations import unittest from transformers import AutoTokenizer, MBartConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel @require_tf class UpperCAmelCase : '''simple docstring''' __UpperCamelCase : Any = MBartConfig __UpperCamelCase : Tuple = {} __UpperCamelCase : Dict = '''gelu''' def __init__( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=1_3 , lowerCAmelCase_ : Dict=7 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=9_9 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Union[str, Any]=3_7 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : List[str]=2_0 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : List[Any]=0 , ): """simple docstring""" _A: Union[str, Any] = parent _A: List[Any] = batch_size _A: Dict = seq_length _A: Dict = is_training _A: str = use_labels _A: int = vocab_size _A: str = hidden_size _A: Tuple = num_hidden_layers _A: Optional[Any] = num_attention_heads _A: Tuple = intermediate_size _A: int = hidden_dropout_prob _A: Tuple = attention_probs_dropout_prob _A: Tuple = max_position_embeddings _A: Dict = eos_token_id _A: int = pad_token_id _A: Any = bos_token_id def __magic_name__ ( self : Dict ): """simple docstring""" _A: Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _A: Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _A: List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) _A: Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _A: int = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _A: Any = prepare_mbart_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return config, inputs_dict def __magic_name__ ( self : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ): """simple docstring""" _A: Tuple = TFMBartModel(config=lowerCAmelCase_ ).get_decoder() _A: List[str] = inputs_dict['''input_ids'''] _A: Tuple = input_ids[:1, :] _A: List[Any] = inputs_dict['''attention_mask'''][:1, :] _A: str = inputs_dict['''head_mask'''] _A: Optional[Any] = 1 # first forward pass _A: Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) _A , _A: List[str] = outputs.to_tuple() _A: Dict = past_key_values[1] def lowerCamelCase__ ( a , a , a , a=None , a=None , a=None , a=None , a=None , ) -> Tuple: if attention_mask is None: _A: Union[str, Any] = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _A: Optional[int] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _A: Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _A: Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _A: Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Union[str, Any] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else () __UpperCamelCase : int = (TFMBartForConditionalGeneration,) if is_tf_available() else () __UpperCamelCase : Tuple = ( { '''conversational''': TFMBartForConditionalGeneration, '''feature-extraction''': TFMBartModel, '''summarization''': TFMBartForConditionalGeneration, '''text2text-generation''': TFMBartForConditionalGeneration, '''translation''': TFMBartForConditionalGeneration, } if is_tf_available() else {} ) __UpperCamelCase : List[Any] = True __UpperCamelCase : int = False __UpperCamelCase : Optional[Any] = False def __magic_name__ ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ): """simple docstring""" if pipeline_test_casse_name != "FeatureExtractionPipelineTests": # Exception encountered when calling layer '...' return True return False def __magic_name__ ( self : Any ): """simple docstring""" _A: Dict = TFMBartModelTester(self ) _A: Tuple = ConfigTester(self , config_class=lowerCAmelCase_ ) def __magic_name__ ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[Any] ): """simple docstring""" _A: str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ ) @require_sentencepiece @require_tokenizers @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' __UpperCamelCase : Optional[int] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ] __UpperCamelCase : List[str] = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', ] __UpperCamelCase : Union[str, Any] = '''facebook/mbart-large-en-ro''' @cached_property def __magic_name__ ( self : Tuple ): """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def __magic_name__ ( self : str ): """simple docstring""" _A: Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def __magic_name__ ( self : Union[str, Any] , **lowerCAmelCase_ : Tuple ): """simple docstring""" _A: Optional[Any] = self.translate_src_text(**lowerCAmelCase_ ) self.assertListEqual(self.expected_text , lowerCAmelCase_ ) def __magic_name__ ( self : Dict , **lowerCAmelCase_ : Tuple ): """simple docstring""" _A: Any = self.tokenizer(self.src_text , **lowerCAmelCase_ , return_tensors='''tf''' ) _A: Any = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 ) _A: Optional[Any] = self.tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) return generated_words @slow def __magic_name__ ( self : List[str] ): """simple docstring""" self._assert_generated_batch_equal_expected()
301
1
"""simple docstring""" class _lowerCAmelCase : """simple docstring""" def __init__( self : int ): __lowercase = "" __lowercase = "" __lowercase = [] def _lowercase ( self : int, UpperCAmelCase__ : int, UpperCAmelCase__ : int ): if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: __lowercase = self.__min_dist_top_down_dp(m - 1, n - 1 ) else: __lowercase = self.__min_dist_top_down_dp(UpperCAmelCase__, n - 1 ) __lowercase = self.__min_dist_top_down_dp(m - 1, UpperCAmelCase__ ) __lowercase = self.__min_dist_top_down_dp(m - 1, n - 1 ) __lowercase = 1 + min(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) return self.dp[m][n] def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : str ): __lowercase = worda __lowercase = worda __lowercase = [[-1 for _ in range(len(UpperCAmelCase__ ) )] for _ in range(len(UpperCAmelCase__ ) )] return self.__min_dist_top_down_dp(len(UpperCAmelCase__ ) - 1, len(UpperCAmelCase__ ) - 1 ) def _lowercase ( self : int, UpperCAmelCase__ : str, UpperCAmelCase__ : str ): __lowercase = worda __lowercase = worda __lowercase = len(UpperCAmelCase__ ) __lowercase = len(UpperCAmelCase__ ) __lowercase = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty __lowercase = j elif j == 0: # second string is empty __lowercase = i elif worda[i - 1] == worda[j - 1]: # last characters are equal __lowercase = self.dp[i - 1][j - 1] else: __lowercase = self.dp[i][j - 1] __lowercase = self.dp[i - 1][j] __lowercase = self.dp[i - 1][j - 1] __lowercase = 1 + min(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) return self.dp[m][n] if __name__ == "__main__": _a = EditDistance() print('****************** Testing Edit Distance DP Algorithm ******************') print() _a = input('Enter the first string: ').strip() _a = input('Enter the second string: ').strip() print() print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}") print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}") print() print('*************** End of Testing Edit Distance DP Algorithm ***************')
17
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> int: if not grid or not grid[0]: raise TypeError('''The grid does not contain the appropriate information''' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] lowercase__: Tuple = grid[0] for row_n in range(1 , len(__UpperCAmelCase ) ): lowercase__: Tuple = grid[row_n] lowercase__: Dict = fill_row(__UpperCAmelCase , __UpperCAmelCase ) lowercase__: Union[str, Any] = grid[row_n] return grid[-1][-1] def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list: current_row[0] += row_above[0] for cell_n in range(1 , len(__UpperCAmelCase ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
177
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _A = logging.getLogger(__name__) @dataclass class _lowerCamelCase : _lowerCamelCase :str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _lowerCamelCase :Optional[str] = field( default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _lowerCamelCase :Optional[str] = field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) _lowerCamelCase :Optional[str] = field( default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _lowerCamelCase :bool = field(default=a_ , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. _lowerCamelCase :Optional[str] = field( default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class _lowerCamelCase : _lowerCamelCase :str = field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) _lowerCamelCase :Optional[str] = field( default=a_ , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) _lowerCamelCase :int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowerCamelCase :bool = field( default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowercase_ ( ) -> List[str]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" """ --overwrite_output_dir to overcome.""" ) lowerCAmelCase__ : Tuple = import_module("""tasks""" ) try: lowerCAmelCase__ : Optional[int] = getattr(__UpperCAmelCase , model_args.task_type ) lowerCAmelCase__ : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , __UpperCAmelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task lowerCAmelCase__ : Any = token_classification_task.get_labels(data_args.labels ) lowerCAmelCase__ : Dict[int, str] = dict(enumerate(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = len(__UpperCAmelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid={label: i for i, label in enumerate(__UpperCAmelCase )} , cache_dir=model_args.cache_dir , ) lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) lowerCAmelCase__ : Optional[int] = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets lowerCAmelCase__ : Tuple = ( TokenClassificationDataset( token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) lowerCAmelCase__ : List[Any] = ( TokenClassificationDataset( token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__UpperCAmelCase , __UpperCAmelCase ) -> Tuple[List[int], List[int]]: lowerCAmelCase__ : Optional[int] = np.argmax(__UpperCAmelCase , axis=2 ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = preds.shape lowerCAmelCase__ : Union[str, Any] = [[] for _ in range(__UpperCAmelCase )] lowerCAmelCase__ : int = [[] for _ in range(__UpperCAmelCase )] for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__UpperCAmelCase ) -> Dict: lowerCAmelCase__ , lowerCAmelCase__ : str = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__UpperCAmelCase , __UpperCAmelCase ), "precision": precision_score(__UpperCAmelCase , __UpperCAmelCase ), "recall": recall_score(__UpperCAmelCase , __UpperCAmelCase ), "f1": fa_score(__UpperCAmelCase , __UpperCAmelCase ), } # Data collator lowerCAmelCase__ : List[str] = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer lowerCAmelCase__ : List[Any] = Trainer( model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , data_collator=__UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowerCAmelCase__ : List[Any] = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowerCAmelCase__ : List[str] = trainer.evaluate() lowerCAmelCase__ : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""" , __UpperCAmelCase , __UpperCAmelCase ) writer.write("""%s = %s\n""" % (key, value) ) results.update(__UpperCAmelCase ) # Predict if training_args.do_predict: lowerCAmelCase__ : List[str] = TokenClassificationDataset( token_classification_task=__UpperCAmelCase , data_dir=data_args.data_dir , tokenizer=__UpperCAmelCase , labels=__UpperCAmelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = trainer.predict(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = align_predictions(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = os.path.join(training_args.output_dir , """test_results.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: for key, value in metrics.items(): logger.info(""" %s = %s""" , __UpperCAmelCase , __UpperCAmelCase ) writer.write("""%s = %s\n""" % (key, value) ) # Save predictions lowerCAmelCase__ : Tuple = os.path.join(training_args.output_dir , """test_predictions.txt""" ) if trainer.is_world_process_zero(): with open(__UpperCAmelCase , """w""" ) as writer: with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f: token_classification_task.write_predictions_to_file(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return results def lowercase_ ( __UpperCAmelCase ) -> str: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
212
"""simple docstring""" from argparse import ArgumentParser from . import BaseTransformersCLICommand def lowercase_ ( __UpperCAmelCase ) -> List[str]: return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _lowerCamelCase ( a_ ): @staticmethod def _lowerCAmelCase ( UpperCamelCase : ArgumentParser ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : int = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" , type=UpperCamelCase , default=UpperCamelCase , help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , ) download_parser.add_argument("""model""" , type=UpperCamelCase , help="""Name of the model to download""" ) download_parser.set_defaults(func=UpperCamelCase ) def __init__( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : bool , UpperCamelCase : bool ) -> Any: """simple docstring""" lowerCAmelCase__ : int = model lowerCAmelCase__ : Union[str, Any] = cache lowerCAmelCase__ : Optional[int] = force lowerCAmelCase__ : Dict = trust_remote_code def _lowerCAmelCase ( self : int ) -> Any: """simple docstring""" from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
212
1
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __UpperCAmelCase ( ) -> str: lowercase__ : Tuple = HfArgumentParser(__lowerCamelCase ) lowercase__ : Optional[Any] = parser.parse_args_into_dataclasses()[0] lowercase__ : int = TensorFlowBenchmark(args=__lowerCamelCase ) try: lowercase__ : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: lowercase__ : Optional[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' lowercase__ : List[str] = ''' '''.join(str(__lowerCamelCase ).split(''' ''' )[:-1] ) lowercase__ : Tuple = '''''' lowercase__ : List[Any] = eval(str(__lowerCamelCase ).split(''' ''' )[-1] ) lowercase__ : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: lowercase__ : Optional[int] = full_error_msg + begin_error_msg + str(__lowerCamelCase ) raise ValueError(__lowerCamelCase ) benchmark.run() if __name__ == "__main__": main()
16
'''simple docstring''' def a__ ( lowerCAmelCase__ ) -> int: if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(lowerCAmelCase__ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
181
0
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" if index == number_of_items: return 0 UpperCamelCase :int = 0 UpperCamelCase :Union[str, Any] = 0 UpperCamelCase :Optional[int] = knapsack(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , index + 1 ) if weights[index] <= max_weight: UpperCamelCase :List[Any] = values[index] + knapsack( __magic_name__ , __magic_name__ , __magic_name__ , max_weight - weights[index] , index + 1 ) return max(__magic_name__ , __magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod()
62
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Any=7 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[Any]=30 , __lowerCamelCase : Union[str, Any]=400 , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Tuple=None , __lowerCamelCase : int=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : int=[0.5, 0.5, 0.5] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : str=1 / 255 , __lowerCamelCase : str=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCamelCase :List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333} UpperCamelCase :Tuple = parent UpperCamelCase :int = batch_size UpperCamelCase :str = num_channels UpperCamelCase :Dict = min_resolution UpperCamelCase :Any = max_resolution UpperCamelCase :int = do_resize UpperCamelCase :str = size UpperCamelCase :Dict = do_normalize UpperCamelCase :Tuple = image_mean UpperCamelCase :Optional[int] = image_std UpperCamelCase :Tuple = do_rescale UpperCamelCase :Optional[Any] = rescale_factor UpperCamelCase :List[Any] = do_pad def _A ( self : List[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _A ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int]=False ): if not batched: UpperCamelCase :Optional[Any] = image_inputs[0] if isinstance(__lowerCamelCase , Image.Image ): UpperCamelCase , UpperCamelCase :Union[str, Any] = image.size else: UpperCamelCase , UpperCamelCase :Optional[int] = image.shape[1], image.shape[2] if w < h: UpperCamelCase :int = int(self.size["""shortest_edge"""] * h / w ) UpperCamelCase :Tuple = self.size["""shortest_edge"""] elif w > h: UpperCamelCase :List[Any] = self.size["""shortest_edge"""] UpperCamelCase :str = int(self.size["""shortest_edge"""] * w / h ) else: UpperCamelCase :List[Any] = self.size["""shortest_edge"""] UpperCamelCase :str = self.size["""shortest_edge"""] else: UpperCamelCase :List[Any] = [] for image in image_inputs: UpperCamelCase , UpperCamelCase :int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase :int = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0] UpperCamelCase :Tuple = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ): snake_case__ : Optional[int] = DeformableDetrImageProcessor if is_vision_available() else None def _A ( self : Optional[Any] ): UpperCamelCase :str = DeformableDetrImageProcessingTester(self ) @property def _A ( self : Optional[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _A ( self : Dict ): UpperCamelCase :int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_rescale""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """do_pad""" ) ) self.assertTrue(hasattr(__lowerCamelCase , """size""" ) ) def _A ( self : str ): UpperCamelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) UpperCamelCase :int = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , __lowerCamelCase ) def _A ( self : List[Any] ): pass def _A ( self : Dict ): # Initialize image_processing UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , Image.Image ) # Test not batched input UpperCamelCase :Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase , UpperCamelCase :Optional[int] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase , UpperCamelCase :str = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) UpperCamelCase :int = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self : Tuple ): # Initialize image_processing UpperCamelCase :Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , np.ndarray ) # Test not batched input UpperCamelCase :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase , UpperCamelCase :Any = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase :Dict = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values UpperCamelCase , UpperCamelCase :Optional[Any] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _A ( self : Any ): # Initialize image_processing UpperCamelCase :Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase ) for image in image_inputs: self.assertIsInstance(__lowerCamelCase , torch.Tensor ) # Test not batched input UpperCamelCase :Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase :Union[str, Any] = image_processing(__lowerCamelCase , return_tensors="""pt""" ).pixel_values UpperCamelCase , UpperCamelCase :List[str] = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _A ( self : Optional[Any] ): # prepare image and target UpperCamelCase :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: UpperCamelCase :str = json.loads(f.read() ) UpperCamelCase :List[Any] = {"""image_id""": 39_769, """annotations""": target} # encode them UpperCamelCase :Optional[int] = DeformableDetrImageProcessor() UpperCamelCase :Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="""pt""" ) # verify pixel values UpperCamelCase :Union[str, Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase ) UpperCamelCase :Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) ) # verify area UpperCamelCase :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) ) # verify boxes UpperCamelCase :List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase ) UpperCamelCase :List[str] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1E-3 ) ) # verify image_id UpperCamelCase :Tuple = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) ) # verify is_crowd UpperCamelCase :List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) ) # verify class_labels UpperCamelCase :Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) ) # verify orig_size UpperCamelCase :Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) ) # verify size UpperCamelCase :int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) ) @slow def _A ( self : str ): # prepare image, target and masks_path UpperCamelCase :Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: UpperCamelCase :Any = json.loads(f.read() ) UpperCamelCase :int = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target} UpperCamelCase :Any = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them UpperCamelCase :Tuple = DeformableDetrImageProcessor(format="""coco_panoptic""" ) UpperCamelCase :Dict = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="""pt""" ) # verify pixel values UpperCamelCase :Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["""pixel_values"""].shape , __lowerCamelCase ) UpperCamelCase :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) ) # verify area UpperCamelCase :List[str] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __lowerCamelCase ) ) # verify boxes UpperCamelCase :List[str] = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __lowerCamelCase ) UpperCamelCase :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __lowerCamelCase , atol=1E-3 ) ) # verify image_id UpperCamelCase :str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __lowerCamelCase ) ) # verify is_crowd UpperCamelCase :Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __lowerCamelCase ) ) # verify class_labels UpperCamelCase :List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __lowerCamelCase ) ) # verify masks UpperCamelCase :Union[str, Any] = 822_873 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __lowerCamelCase ) # verify orig_size UpperCamelCase :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __lowerCamelCase ) ) # verify size UpperCamelCase :str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __lowerCamelCase ) )
62
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase__ = { 'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ 'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'LongT5EncoderModel', 'LongT5ForConditionalGeneration', 'LongT5Model', 'LongT5PreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ 'FlaxLongT5ForConditionalGeneration', 'FlaxLongT5Model', 'FlaxLongT5PreTrainedModel', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
65
def lowerCAmelCase_ ( __A, __A ) -> None: '''simple docstring''' UpperCAmelCase__ = len(__A ) print("The following activities are selected:" ) # The first activity is always selected UpperCAmelCase__ = 0 print(__A, end="," ) # Consider rest of the activities for j in range(__A ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(__A, end="," ) UpperCAmelCase__ = j if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ = [1, 3, 0, 5, 8, 5] UpperCamelCase__ = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
65
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class a__ ( snake_case__ , snake_case__ , unittest.TestCase ): _a : Optional[int] = IFPipeline _a : str = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} _a : str = TEXT_TO_IMAGE_BATCH_PARAMS _a : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''} def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" return self._get_dummy_components() def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ): """simple docstring""" if str(_snake_case ).startswith("mps" ): __lowerCAmelCase = torch.manual_seed(_snake_case ) else: __lowerCAmelCase = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) __lowerCAmelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self._test_save_load_local() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE( self ): """simple docstring""" __lowerCAmelCase = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) __lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=_snake_case , tokenizer=_snake_case ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) __lowerCAmelCase , __lowerCAmelCase = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __lowerCAmelCase = None __lowerCAmelCase = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_snake_case , _snake_case , _snake_case , _snake_case ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components ) __lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_snake_case , _snake_case , _snake_case , _snake_case ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components ) __lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_snake_case , _snake_case , _snake_case , _snake_case ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ): """simple docstring""" _start_torch_memory_measurement() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe_a( prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (6_4, 6_4, 3) __lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(_snake_case , _snake_case ) # pipeline 2 _start_torch_memory_measurement() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = pipe_a( prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(_snake_case , _snake_case ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ): """simple docstring""" _start_torch_memory_measurement() __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe_a( prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (6_4, 6_4, 3) __lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(_snake_case , _snake_case ) # pipeline 2 _start_torch_memory_measurement() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = pipe_a( prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(_snake_case , _snake_case ) def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A ): """simple docstring""" _start_torch_memory_measurement() __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(_snake_case ) __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = pipe_a( prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , num_inference_steps=2 , generator=_snake_case , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (6_4, 6_4, 3) __lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(_snake_case , _snake_case ) # pipeline 2 _start_torch_memory_measurement() __lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 ) __lowerCAmelCase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(_snake_case ) __lowerCAmelCase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(_snake_case ) __lowerCAmelCase = pipe_a( prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , image=_snake_case , mask_image=_snake_case , original_image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="np" , ) __lowerCAmelCase = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __lowerCAmelCase = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __lowerCAmelCase = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(_snake_case , _snake_case ) def _a ( ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
357
from __future__ import annotations def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , ): if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("You cannot supply more or less than 2 values" ) elif electron_conc < 0: raise ValueError("Electron concentration cannot be negative in a semiconductor" ) elif hole_conc < 0: raise ValueError("Hole concentration cannot be negative in a semiconductor" ) elif intrinsic_conc < 0: raise ValueError( "Intrinsic concentration cannot be negative in a semiconductor" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
102
0