code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType __magic_name__: Tuple = logging.get_logger(__name__) class snake_case__ ( UpperCAmelCase__ ): lowercase__ : Union[str, Any] = '''vision-encoder-decoder''' lowercase__ : List[Any] = True def __init__( self , **lowerCAmelCase__ ) -> Optional[Any]: super().__init__(**UpperCAmelCase__ ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( F'A configuraton of type {self.model_type} cannot be instantiated because ' F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' ) __magic_name__ : Tuple = kwargs.pop("""encoder""" ) __magic_name__ : Any = encoder_config.pop("""model_type""" ) __magic_name__ : Optional[int] = kwargs.pop("""decoder""" ) __magic_name__ : List[Any] = decoder_config.pop("""model_type""" ) __magic_name__ : Union[str, Any] = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) __magic_name__ : Any = AutoConfig.for_model(UpperCAmelCase__ , **UpperCAmelCase__ ) __magic_name__ : Tuple = True @classmethod def __magic_name__ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> PretrainedConfig: logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __magic_name__ : Dict = True __magic_name__ : Any = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCAmelCase__ ) def __magic_name__ ( self ) -> List[str]: __magic_name__ : Any = copy.deepcopy(self.__dict__ ) __magic_name__ : Dict = self.encoder.to_dict() __magic_name__ : Any = self.decoder.to_dict() __magic_name__ : Tuple = self.__class__.model_type return output class snake_case__ ( UpperCAmelCase__ ): lowercase__ : List[str] = version.parse('''1.11''' ) @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def __magic_name__ ( self ) -> float: return 1e-4 @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class snake_case__ ( UpperCAmelCase__ ): @property def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]: __magic_name__ : List[str] = OrderedDict() __magic_name__ : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __magic_name__ : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __magic_name__ : Union[str, Any] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = False , lowerCAmelCase__ = None , ) -> Mapping[str, Any]: import torch __magic_name__ : Any = OrderedDict() __magic_name__ : List[str] = super().generate_dummy_inputs( UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ ) __magic_name__ ,__magic_name__ : Dict = dummy_input["""input_ids"""].shape __magic_name__ : Any = (batch, encoder_sequence, self._config.encoder_hidden_size) __magic_name__ : List[Any] = dummy_input.pop("""input_ids""" ) __magic_name__ : List[Any] = dummy_input.pop("""attention_mask""" ) __magic_name__ : Dict = torch.zeros(UpperCAmelCase__ ) return common_inputs class snake_case__ ( UpperCAmelCase__ ): @property def __magic_name__ ( self ) -> None: pass def __magic_name__ ( self , lowerCAmelCase__ ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(UpperCAmelCase__ ) def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = "default" ) -> OnnxConfig: __magic_name__ : int = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCAmelCase__ , UpperCAmelCase__ )
324
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Dict: """simple docstring""" if "." in tensor_name: A__ = tensor_name.split('''.''' ) for split in splits[:-1]: A__ = getattr(lowercase_ , lowercase_ ) if new_module is None: raise ValueError(f"""{module} has no attribute {split}.""" ) A__ = new_module A__ = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" ) A__ = tensor_name in module._buffers A__ = getattr(lowercase_ , lowercase_ ) if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None: raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" ) A__ = False A__ = False if is_buffer or not is_bitsandbytes_available(): A__ = False A__ = False else: A__ = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) A__ = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: A__ = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to('''cpu''' ) if value.dtype == torch.inta: A__ = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse( '''0.37.2''' ) if not is_abit_serializable: raise ValueError( '''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. ''' '''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' ) else: A__ = torch.tensor(lowercase_ , device='''cpu''' ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , lowercase_ ) and fpaa_statistics is None: A__ = new_value.T A__ = old_value.__dict__ if is_abit: A__ = bnb.nn.IntaParams(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) elif is_abit: A__ = bnb.nn.Paramsabit(lowercase_ , requires_grad=lowercase_ , **lowercase_ ).to(lowercase_ ) A__ = new_value if fpaa_statistics is not None: setattr(module.weight , '''SCB''' , fpaa_statistics.to(lowercase_ ) ) else: if value is None: A__ = old_value.to(lowercase_ ) elif isinstance(lowercase_ , torch.Tensor ): A__ = value.to(lowercase_ ) else: A__ = torch.tensor(lowercase_ , device=lowercase_ ) if is_buffer: A__ = new_value else: A__ = nn.Parameter(lowercase_ , requires_grad=old_value.requires_grad ) A__ = new_value def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False ) -> Dict: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: A__ = [] current_key_name.append(lowercase_ ) if (isinstance(lowercase_ , nn.Linear ) or isinstance(lowercase_ , lowercase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in '''.'''.join(lowercase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(lowercase_ , lowercase_ ): A__ , A__ = module.weight.shape else: A__ = module.in_features A__ = module.out_features if quantization_config.quantization_method() == "llm_int8": A__ = bnb.nn.LinearabitLt( lowercase_ , lowercase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) A__ = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: A__ = bnb.nn.Linearabit( lowercase_ , lowercase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) A__ = True # Store the module class in case we need to transpose the weight later A__ = type(lowercase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(lowercase_ ) if len(list(module.children() ) ) > 0: A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ , has_been_replaced=lowercase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> Tuple: """simple docstring""" A__ = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert A__ , A__ = _replace_with_bnb_linear( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Dict: """simple docstring""" warnings.warn( '''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , lowercase_ , ) return replace_with_bnb_linear(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( *lowercase_ , **lowercase_ ) -> Optional[Any]: """simple docstring""" warnings.warn( '''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , lowercase_ , ) return set_module_quantized_tensor_to_device(*lowercase_ , **lowercase_ ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]: """simple docstring""" A__ = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() A__ = find_tied_parameters(lowercase_ ) # For compatibility with Accelerate < 0.18 if isinstance(lowercase_ , lowercase_ ): A__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: A__ = sum(lowercase_ , [] ) A__ = len(lowercase_ ) > 0 # Check if it is a base model A__ = not hasattr(lowercase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head A__ = list(model.named_children() ) A__ = [list_modules[-1][0]] # add last module together with tied weights A__ = set(lowercase_ ) - set(lowercase_ ) A__ = list(set(lowercase_ ) ) + list(lowercase_ ) # remove ".weight" from the keys A__ = ['''.weight''', '''.bias'''] A__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: A__ = name.replace(lowercase_ , '''''' ) filtered_module_names.append(lowercase_ ) return filtered_module_names
87
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A : Optional[Any] = logging.get_logger(__name__) A : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCamelCase (UpperCAmelCase__ ): """simple docstring""" lowerCamelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , __magic_name__ : Optional[int]=3 , __magic_name__ : Optional[Any]=224 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Optional[int]=8 , __magic_name__ : Tuple="relu6" , __magic_name__ : List[Any]=True , __magic_name__ : Dict=0.999 , __magic_name__ : str=0.02 , __magic_name__ : Optional[int]=0.001 , **__magic_name__ : Dict , ) -> List[str]: super().__init__(**UpperCAmelCase__ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) SCREAMING_SNAKE_CASE_ = num_channels SCREAMING_SNAKE_CASE_ = image_size SCREAMING_SNAKE_CASE_ = depth_multiplier SCREAMING_SNAKE_CASE_ = min_depth SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = tf_padding SCREAMING_SNAKE_CASE_ = classifier_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps class lowerCamelCase (UpperCAmelCase__ ): """simple docstring""" lowerCamelCase__ = version.parse('''1.11''' ) @property def __A ( self : Any ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __A ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __A ( self : int ) -> float: return 1e-4
140
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) _lowerCamelCase : str = 299792458 # Symbols _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""") def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" if velocity > c: raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError('''Speed must be greater than or equal to 1!''' ) return velocity / c def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float: """simple docstring""" return 1 / sqrt(1 - beta(lowercase_ ) ** 2 ) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray: """simple docstring""" return np.array( [ [gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0], [-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray: """simple docstring""" if event is None: A__ = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(lowercase_ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: _lowerCamelCase : Tuple = transform(29979245) print("""Example of four vector: """) print(F'''ct\' = {four_vector[0]}''') print(F'''x\' = {four_vector[1]}''') print(F'''y\' = {four_vector[2]}''') print(F'''z\' = {four_vector[3]}''') # Substitute symbols with numerical values _lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1} _lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)] print(F'''\n{numerical_vector}''')
87
0
"""simple docstring""" def A_ ( snake_case__ ) -> list[list[float]]: _UpperCamelCase :Tuple = [] for data in source_data: for i, el in enumerate(lowercase_ ): if len(lowercase_ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(lowercase_ ) ) return data_lists def A_ ( snake_case__ , snake_case__ ) -> list[list[float]]: _UpperCamelCase :Tuple = [] for dlist, weight in zip(lowercase_ , lowercase_ ): _UpperCamelCase :Dict = min(lowercase_ ) _UpperCamelCase :Union[str, Any] = max(lowercase_ ) _UpperCamelCase :Optional[Any] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: _UpperCamelCase :List[str] = f"Invalid weight of {weight:f} provided" raise ValueError(lowercase_ ) score_lists.append(lowercase_ ) return score_lists def A_ ( snake_case__ ) -> list[float]: _UpperCamelCase :List[str] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(lowercase_ ): _UpperCamelCase :Any = final_scores[j] + ele return final_scores def A_ ( snake_case__ , snake_case__ ) -> list[list[float]]: _UpperCamelCase :Any = get_data(lowercase_ ) _UpperCamelCase :List[str] = calculate_each_score(lowercase_ , lowercase_ ) _UpperCamelCase :Any = generate_final_scores(lowercase_ ) # append scores to source data for i, ele in enumerate(lowercase_ ): source_data[i].append(lowercase_ ) return source_data
355
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> list: """simple docstring""" if len(lowercase_ ) <= 1: return [tuple(lowercase_ )] A__ = [] def generate(lowercase_ , lowercase_ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowercase_ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A__ , A__ = arr[k - 1], arr[i] else: # k is odd A__ , A__ = arr[k - 1], arr[0] generate(k - 1 , lowercase_ ) generate(len(lowercase_ ) , lowercase_ ) return res if __name__ == "__main__": _lowerCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip() _lowerCamelCase : str = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
87
0
import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging UpperCamelCase__ = """\ """ UpperCamelCase__ = """ Perplexity (PPL) is one of the most common metrics for evaluating language models. It is defined as the exponentiated average negative log-likelihood of a sequence. For more information, see https://huggingface.co/docs/transformers/perplexity """ UpperCamelCase__ = """ Args: model_id (str): model used for calculating Perplexity NOTE: Perplexity can only be calculated for causal language models. This includes models such as gpt2, causal variations of bert, causal versions of t5, and more (the full list can be found in the AutoModelForCausalLM documentation here: https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM ) input_texts (list of str): input text, each separate text snippet is one list entry. batch_size (int): the batch size to run texts through the model. Defaults to 16. add_start_token (bool): whether to add the start token to the texts, so the perplexity can include the probability of the first word. Defaults to True. device (str): device to run on, defaults to 'cuda' when available Returns: perplexity: dictionary containing the perplexity scores for the texts in the input list, as well as the mean perplexity. If one of the input texts is longer than the max input length of the model, then it is truncated to the max length for the perplexity computation. Examples: Example 1: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"] >>> results = perplexity.compute(model_id='gpt2', ... add_start_token=False, ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 78.22 >>> print(round(results[\"perplexities\"][0], 2)) 11.11 Example 2: >>> perplexity = datasets.load_metric(\"perplexity\") >>> input_texts = datasets.load_dataset(\"wikitext\", ... \"wikitext-2-raw-v1\", ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS [...] >>> input_texts = [s for s in input_texts if s!=''] >>> results = perplexity.compute(model_id='gpt2', ... input_texts=input_texts) # doctest:+ELLIPSIS >>> print(list(results.keys())) ['perplexities', 'mean_perplexity'] >>> print(round(results[\"mean_perplexity\"], 2)) 60.35 >>> print(round(results[\"perplexities\"][0], 2)) 81.12 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__ ( datasets.Metric ): '''simple docstring''' def lowerCAmelCase_ ( self : int ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowerCAmelCase_ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : int = 16 , UpperCamelCase : bool = True , UpperCamelCase : Union[str, Any]=None ): """simple docstring""" if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _lowercase : str = '''cuda''' else: _lowercase : Any = '''cuda''' if torch.cuda.is_available() else '''cpu''' _lowercase : List[str] = AutoModelForCausalLM.from_pretrained(UpperCAmelCase__ ) _lowercase : str = model.to(UpperCAmelCase__ ) _lowercase : str = AutoTokenizer.from_pretrained(UpperCAmelCase__ ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _lowercase : Optional[int] = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(UpperCAmelCase__ ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _lowercase : Tuple = model.config.max_length - 1 else: _lowercase : Optional[int] = model.config.max_length _lowercase : Tuple = tokenizer( UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , return_tensors='''pt''' , return_attention_mask=UpperCAmelCase__ , ).to(UpperCAmelCase__ ) _lowercase : Dict = encodings['''input_ids'''] _lowercase : Dict = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _lowercase : Any = [] _lowercase : Any = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(UpperCAmelCase__ ) , UpperCAmelCase__ ) ): _lowercase : List[Any] = min(start_index + batch_size , len(UpperCAmelCase__ ) ) _lowercase : Tuple = encoded_texts[start_index:end_index] _lowercase : int = attn_masks[start_index:end_index] if add_start_token: _lowercase : str = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(UpperCAmelCase__ ) _lowercase : Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _lowercase : str = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(UpperCAmelCase__ ), attn_mask] , dim=1 ) _lowercase : Any = encoded_batch with torch.no_grad(): _lowercase : Dict = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ ).logits _lowercase : Optional[Any] = out_logits[..., :-1, :].contiguous() _lowercase : Optional[int] = labels[..., 1:].contiguous() _lowercase : Dict = attn_mask[..., 1:].contiguous() _lowercase : List[str] = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , UpperCAmelCase__ ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(UpperCAmelCase__ )}
322
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return 1.0 / (1.0 + np.exp(-_outputs )) def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Union[str, Any]: """simple docstring""" A__ = np.max(_outputs , axis=-1 , keepdims=lowercase_ ) A__ = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase_ ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''sigmoid''' UpperCAmelCase__ = '''softmax''' UpperCAmelCase__ = '''none''' @add_end_docstrings( UpperCAmelCase__ , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False UpperCAmelCase__ = ClassificationFunction.NONE def __init__( self : Any , **UpperCAmelCase__ : Optional[Any]) ->str: '''simple docstring''' super().__init__(**UpperCAmelCase__) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int="" , **UpperCAmelCase__ : Any) ->int: '''simple docstring''' A__ = tokenizer_kwargs A__ = {} if hasattr(self.model.config , '''return_all_scores''') and return_all_scores is None: A__ = self.model.config.return_all_scores if isinstance(UpperCAmelCase__ , UpperCAmelCase__) or top_k is None: A__ = top_k A__ = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , UpperCAmelCase__ , ) if return_all_scores: A__ = None else: A__ = 1 if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A__ = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self : str , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[int]) ->Union[str, Any]: '''simple docstring''' A__ = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A__ = '''top_k''' not in kwargs if isinstance(args[0] , UpperCAmelCase__) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Any , **UpperCAmelCase__ : str) ->Dict[str, GenericTensor]: '''simple docstring''' A__ = self.framework if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return self.tokenizer(**UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__) and len(UpperCAmelCase__) == 1 and isinstance(inputs[0] , UpperCAmelCase__) and len(inputs[0]) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''') return self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Tuple) ->Tuple: '''simple docstring''' return self.model(**UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=True) ->Dict: '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A__ = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A__ = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''') and function_to_apply is None: A__ = self.model.config.function_to_apply else: A__ = ClassificationFunction.NONE A__ = model_outputs['''logits'''][0] A__ = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A__ = sigmoid(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.SOFTMAX: A__ = softmax(UpperCAmelCase__) elif function_to_apply == ClassificationFunction.NONE: A__ = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""") if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A__ = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(UpperCAmelCase__) ] if not _legacy: dict_scores.sort(key=lambda UpperCAmelCase__: x["score"] , reverse=UpperCAmelCase__) if top_k is not None: A__ = dict_scores[:top_k] return dict_scores
87
0
"""simple docstring""" import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup __A = logging.get_logger(__name__) class snake_case ( UpperCAmelCase__ ): def __init__( self : Union[str, Any] , **UpperCamelCase__ : Optional[int])-> Optional[int]: '''simple docstring''' requires_backends(self , ["bs4"]) super().__init__(**UpperCAmelCase__) def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Dict)-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: str = [] __lowerCAmelCase: List[Any] = [] __lowerCAmelCase: int = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __lowerCAmelCase: str = parent.find_all(child.name , recursive=UpperCAmelCase__) xpath_tags.append(child.name) xpath_subscripts.append( 0 if 1 == len(UpperCAmelCase__) else next(i for i, s in enumerate(UpperCAmelCase__ , 1) if s is child)) __lowerCAmelCase: Optional[int] = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def lowercase_ ( self : Dict , UpperCamelCase__ : Dict)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Optional[Any] = BeautifulSoup(UpperCAmelCase__ , "html.parser") __lowerCAmelCase: Optional[Any] = [] __lowerCAmelCase: Tuple = [] __lowerCAmelCase: Union[str, Any] = [] for element in html_code.descendants: if type(UpperCAmelCase__) == bsa.element.NavigableString: if type(element.parent) != bsa.element.Tag: continue __lowerCAmelCase: Optional[Any] = html.unescape(UpperCAmelCase__).strip() if not text_in_this_tag: continue all_doc_strings.append(UpperCAmelCase__) __lowerCAmelCase , __lowerCAmelCase: Dict = self.xpath_soup(UpperCAmelCase__) stringaxtag_seq.append(UpperCAmelCase__) stringaxsubs_seq.append(UpperCAmelCase__) if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError("Number of doc strings and xtags does not correspond") if len(UpperCAmelCase__) != len(UpperCAmelCase__): raise ValueError("Number of doc strings and xsubs does not correspond") return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def lowercase_ ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Optional[int] = "" for tagname, subs in zip(UpperCAmelCase__ , UpperCAmelCase__): xpath += f"/{tagname}" if subs != 0: xpath += f"[{subs}]" return xpath def __call__( self : Optional[Any] , UpperCamelCase__ : Tuple)-> BatchFeature: '''simple docstring''' __lowerCAmelCase: List[str] = False # Check that strings has a valid type if isinstance(UpperCAmelCase__ , UpperCAmelCase__): __lowerCAmelCase: Optional[Any] = True elif isinstance(UpperCAmelCase__ , (list, tuple)): if len(UpperCAmelCase__) == 0 or isinstance(html_strings[0] , UpperCAmelCase__): __lowerCAmelCase: Tuple = True if not valid_strings: raise ValueError( "HTML strings must of type `str`, `List[str]` (batch of examples), " f"but is of type {type(UpperCAmelCase__)}.") __lowerCAmelCase: Optional[int] = bool(isinstance(UpperCAmelCase__ , (list, tuple)) and (isinstance(html_strings[0] , UpperCAmelCase__))) if not is_batched: __lowerCAmelCase: Optional[int] = [html_strings] # Get nodes + xpaths __lowerCAmelCase: Tuple = [] __lowerCAmelCase: Optional[Any] = [] for html_string in html_strings: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: int = self.get_three_from_single(UpperCAmelCase__) nodes.append(UpperCAmelCase__) __lowerCAmelCase: Union[str, Any] = [] for node, tag_list, sub_list in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__): __lowerCAmelCase: Any = self.construct_xpath(UpperCAmelCase__ , UpperCAmelCase__) xpath_strings.append(UpperCAmelCase__) xpaths.append(UpperCAmelCase__) # return as Dict __lowerCAmelCase: Optional[int] = {"nodes": nodes, "xpaths": xpaths} __lowerCAmelCase: str = BatchFeature(data=UpperCAmelCase__ , tensor_type=UpperCAmelCase__) return encoded_inputs
346
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCamelCase : Any = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : int = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Union[str, Any] = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys _lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
87
0
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE : Tuple = "pixel_values" __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : str = TimmBackboneConfig def __init__( self , lowercase_ , **lowercase_ ) -> Optional[Any]: requires_backends(self , 'timm' ) super().__init__(UpperCAmelCase__ ) UpperCAmelCase = config if config.backbone is None: raise ValueError('backbone is not set in the config. Please set it to a timm model name.' ) if config.backbone not in timm.list_models(): raise ValueError(F"backbone {config.backbone} is not supported by timm." ) if hasattr(UpperCAmelCase__ , 'out_features' ) and config.out_features is not None: raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' ) UpperCAmelCase = getattr(UpperCAmelCase__ , 'use_pretrained_backbone' , UpperCAmelCase__ ) if pretrained is None: raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' ) # We just take the final layer by default. This matches the default for the transformers models. UpperCAmelCase = config.out_indices if getattr(UpperCAmelCase__ , 'out_indices' , UpperCAmelCase__ ) is not None else (-1,) UpperCAmelCase = timm.create_model( config.backbone , pretrained=UpperCAmelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase__ , **UpperCAmelCase__ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. UpperCAmelCase = self._backbone.return_layers UpperCAmelCase = {layer['module']: str(UpperCAmelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCAmelCase__ ) @classmethod def a_ ( cls , lowercase_ , *lowercase_ , **lowercase_ ) -> Union[str, Any]: requires_backends(cls , ['vision', 'timm'] ) from ...models.timm_backbone import TimmBackboneConfig UpperCAmelCase = kwargs.pop('config' , TimmBackboneConfig() ) UpperCAmelCase = kwargs.pop('use_timm_backbone' , UpperCAmelCase__ ) if not use_timm: raise ValueError('use_timm_backbone must be True for timm backbones' ) UpperCAmelCase = kwargs.pop('num_channels' , config.num_channels ) UpperCAmelCase = kwargs.pop('features_only' , config.features_only ) UpperCAmelCase = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone ) UpperCAmelCase = kwargs.pop('out_indices' , config.out_indices ) UpperCAmelCase = TimmBackboneConfig( backbone=UpperCAmelCase__ , num_channels=UpperCAmelCase__ , features_only=UpperCAmelCase__ , use_pretrained_backbone=UpperCAmelCase__ , out_indices=UpperCAmelCase__ , ) return super()._from_config(UpperCAmelCase__ , **UpperCAmelCase__ ) def a_ ( self , lowercase_ ) -> int: pass def a_ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]: UpperCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('Cannot output attentions for timm backbones at the moment' ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone UpperCAmelCase = self._all_layers UpperCAmelCase = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase = self._return_layers UpperCAmelCase = tuple(hidden_states[i] for i in self.out_indices ) else: UpperCAmelCase = self._backbone(UpperCAmelCase__ , **UpperCAmelCase__ ) UpperCAmelCase = None UpperCAmelCase = tuple(UpperCAmelCase__ ) UpperCAmelCase = tuple(UpperCAmelCase__ ) if hidden_states is not None else None if not return_dict: UpperCAmelCase = (feature_maps,) if output_hidden_states: UpperCAmelCase = output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCAmelCase__ , hidden_states=UpperCAmelCase__ , attentions=UpperCAmelCase__ )
373
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { """google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""", """google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''mobilenet_v1''' def __init__( self : Optional[int] , UpperCAmelCase__ : Optional[int]=3 , UpperCAmelCase__ : Optional[Any]=224 , UpperCAmelCase__ : Optional[int]=1.0 , UpperCAmelCase__ : Optional[int]=8 , UpperCAmelCase__ : Tuple="relu6" , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Dict=0.999 , UpperCAmelCase__ : str=0.02 , UpperCAmelCase__ : Optional[int]=0.001 , **UpperCAmelCase__ : Dict , ) ->List[str]: '''simple docstring''' super().__init__(**UpperCAmelCase__) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''') A__ = num_channels A__ = image_size A__ = depth_multiplier A__ = min_depth A__ = hidden_act A__ = tf_padding A__ = classifier_dropout_prob A__ = initializer_range A__ = layer_norm_eps class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = version.parse('''1.11''' ) @property def SCREAMING_SNAKE_CASE ( self : Any) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict([('''pixel_values''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : List[str]) ->Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})]) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})]) @property def SCREAMING_SNAKE_CASE ( self : int) ->float: '''simple docstring''' return 1e-4
87
0
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class __lowercase ( UpperCAmelCase__ ): """simple docstring""" def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> int: super().__init__() A : List[str] = value_function A : List[Any] = unet A : Optional[Any] = scheduler A : List[Any] = env A : List[str] = env.get_dataset() A : List[Any] = {} for key in self.data.keys(): try: A : Dict = self.data[key].mean() except: # noqa: E722 pass A : Optional[int] = {} for key in self.data.keys(): try: A : str = self.data[key].std() except: # noqa: E722 pass A : str = env.observation_space.shape[0] A : Union[str, Any] = env.action_space.shape[0] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: return (x_in - self.means[key]) / self.stds[key] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Any: return x_in * self.stds[key] + self.means[key] def snake_case ( self , __UpperCAmelCase ) -> Dict: if type(UpperCAmelCase__ ) is dict: return {k: self.to_torch(UpperCAmelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(UpperCAmelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(UpperCAmelCase__ , device=self.unet.device ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any: for key, val in cond.items(): A : Optional[Any] = val.clone() return x_in def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: A : int = x.shape[0] A : Dict = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model A : int = torch.full((batch_size,) , UpperCAmelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(UpperCAmelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models A : Tuple = self.value_function(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample A : Union[str, Any] = torch.autograd.grad([y.sum()] , [x] )[0] A : str = self.scheduler._get_variance(UpperCAmelCase__ ) A : str = torch.exp(0.5 * posterior_variance ) A : Optional[Any] = model_std * grad A : Optional[int] = 0 A : str = x.detach() A : Dict = x + scale * grad A : int = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim ) A : str = self.unet(x.permute(0 , 2 , 1 ) , UpperCAmelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg A : Optional[int] = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , predict_epsilon=UpperCAmelCase__ )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) A : List[Any] = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim ) A : List[Any] = self.to_torch(UpperCAmelCase__ ) return x, y def __call__( self , __UpperCAmelCase , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 ) -> List[str]: A : Any = self.normalize(UpperCAmelCase__ , '''observations''' ) A : Tuple = obs[None].repeat(UpperCAmelCase__ , axis=0 ) A : Union[str, Any] = {0: self.to_torch(UpperCAmelCase__ )} A : Optional[Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) A : Union[str, Any] = randn_tensor(UpperCAmelCase__ , device=self.unet.device ) A : Union[str, Any] = self.reset_xa(UpperCAmelCase__ , UpperCAmelCase__ , self.action_dim ) A : Any = self.to_torch(UpperCAmelCase__ ) # run the diffusion process A , A : Any = self.run_diffusion(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) # sort output trajectories by value A : Optional[int] = y.argsort(0 , descending=UpperCAmelCase__ ).squeeze() A : Any = x[sorted_idx] A : Tuple = sorted_values[:, :, : self.action_dim] A : List[Any] = actions.detach().cpu().numpy() A : Union[str, Any] = self.de_normalize(UpperCAmelCase__ , key='''actions''' ) # select the action with the highest value if y is not None: A : str = 0 else: # if we didn't run value guiding, select a random action A : List[Any] = np.random.randint(0 , UpperCAmelCase__ ) A : Optional[Any] = denorm_actions[selected_index, 0] return denorm_actions
542
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCamelCase : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _lowerCamelCase : str = 5 _lowerCamelCase : int = 10 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = SpeechaTextTokenizer UpperCAmelCase__ = False UpperCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' super().setUp() A__ = sp.SentencePieceProcessor() spm_model.Load(UpperCAmelCase__) A__ = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>'''] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(UpperCAmelCase__))] A__ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__)))) A__ = Path(self.tmpdirname) save_json(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''vocab_file''']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCAmelCase__ , save_dir / VOCAB_FILES_NAMES['''spm_file''']) A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' A__ = '''<pad>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' A__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '''<s>''') self.assertEqual(vocab_keys[1] , '''<pad>''') self.assertEqual(vocab_keys[-1] , '''j''') self.assertEqual(len(UpperCAmelCase__) , 1_001) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[str]: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_001) def SCREAMING_SNAKE_CASE ( self : int) ->List[str]: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) A__ = tokenizer.tokenize('''This is a test''') self.assertListEqual(UpperCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [289, 50, 14, 174, 386] , ) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''') self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , ) A__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual(UpperCAmelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8]) A__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , ) @slow def SCREAMING_SNAKE_CASE ( self : int) ->List[Any]: '''simple docstring''' A__ = {'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , ) @require_sentencepiece class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = '''valhalla/s2t_mustc_multilinguial_medium''' UpperCAmelCase__ = '''C\'est trop cool''' UpperCAmelCase__ = '''Esto es genial''' @classmethod def SCREAMING_SNAKE_CASE ( cls : Dict) ->Dict: '''simple docstring''' A__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def SCREAMING_SNAKE_CASE ( self : str) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 11) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' self.assertEqual(self.tokenizer.vocab_size , 10_000) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids) A__ = [ES_CODE, 4, 1_601, 47, 7_647, 2] A__ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__) A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->str: '''simple docstring''' A__ = '''fr''' A__ = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , UpperCAmelCase__) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = '''fr''' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) A__ = '''es''' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
87
0
'''simple docstring''' SCREAMING_SNAKE_CASE_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) SCREAMING_SNAKE_CASE_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def lowerCamelCase__ ( a__ , a__ , a__) -> float: """simple docstring""" _snake_case : Optional[Any] = from_type.lower().strip('s') _snake_case : List[str] = to_type.lower().strip('s') _snake_case : Dict = UNIT_SYMBOL.get(lowercase_ , lowercase_) _snake_case : Optional[int] = UNIT_SYMBOL.get(lowercase_ , lowercase_) if from_sanitized not in METRIC_CONVERSION: _snake_case : str = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(lowercase_)}""" ) raise ValueError(lowercase_) if to_sanitized not in METRIC_CONVERSION: _snake_case : Optional[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {', '.join(lowercase_)}""" ) raise ValueError(lowercase_) _snake_case : Tuple = METRIC_CONVERSION[from_sanitized] _snake_case : List[Any] = METRIC_CONVERSION[to_sanitized] _snake_case : Any = 1 if from_exponent > to_exponent: _snake_case : Optional[Any] = from_exponent - to_exponent else: _snake_case : List[str] = -(to_exponent - from_exponent) return value * pow(1_0 , lowercase_) if __name__ == "__main__": from doctest import testmod testmod()
517
from __future__ import annotations import requests def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" A__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(lowercase_ ).json() def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> list[dict]: """simple docstring""" A__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' A__ = requests.get(lowercase_ ).json()[:max_stories] return [get_hackernews_story(lowercase_ ) for story_id in story_ids] def SCREAMING_SNAKE_CASE ( lowercase_ = 10 ) -> str: """simple docstring""" A__ = hackernews_top_stories(lowercase_ ) return "\n".join('''* [{title}]({url})'''.format(**lowercase_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
87
0
'''simple docstring''' def UpperCamelCase ( a , a ) -> int: '''simple docstring''' return int((input_a, input_a).count(1 ) != 0 ) def UpperCamelCase ( ) -> None: '''simple docstring''' assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
432
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _lowerCamelCase : Optional[List[str]] = None _lowerCamelCase : int = """<""" if sys.byteorder == """little""" else """>""" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _lowerCamelCase : Union[str, Any] = [ np.dtype("""|b1"""), np.dtype("""|u1"""), np.dtype("""<u2"""), np.dtype(""">u2"""), np.dtype("""<i2"""), np.dtype(""">i2"""), np.dtype("""<u4"""), np.dtype(""">u4"""), np.dtype("""<i4"""), np.dtype(""">i4"""), np.dtype("""<f4"""), np.dtype(""">f4"""), np.dtype("""<f8"""), np.dtype(""">f8"""), ] @dataclass class UpperCamelCase_ : '''simple docstring''' UpperCAmelCase__ = True UpperCAmelCase__ = None # Automatically constructed UpperCAmelCase__ = "PIL.Image.Image" UpperCAmelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) UpperCAmelCase__ = field(default='''Image''' , init=UpperCAmelCase__ , repr=UpperCAmelCase__ ) def __call__( self : List[str]) ->List[str]: '''simple docstring''' return self.pa_type def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) ->dict: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''') if isinstance(UpperCAmelCase__ , UpperCAmelCase__): A__ = np.array(UpperCAmelCase__) if isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": value, "bytes": None} elif isinstance(UpperCAmelCase__ , UpperCAmelCase__): return {"path": None, "bytes": value} elif isinstance(UpperCAmelCase__ , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(UpperCAmelCase__) elif isinstance(UpperCAmelCase__ , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(UpperCAmelCase__) elif value.get('''path''') is not None and os.path.isfile(value['''path''']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('''path''')} elif value.get('''bytes''') is not None or value.get('''path''') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('''bytes'''), "path": value.get('''path''')} else: raise ValueError( f"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""") def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : dict , UpperCAmelCase__ : str=None) ->"PIL.Image.Image": '''simple docstring''' if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support decoding images, please install \'Pillow\'.''') if token_per_repo_id is None: A__ = {} A__ , A__ = value['''path'''], value['''bytes'''] if bytes_ is None: if path is None: raise ValueError(f"""An image should have one of 'path' or 'bytes' but both are None in {value}.""") else: if is_local_path(UpperCAmelCase__): A__ = PIL.Image.open(UpperCAmelCase__) else: A__ = path.split('''::''')[-1] try: A__ = string_to_dict(UpperCAmelCase__ , config.HUB_DATASETS_URL)['''repo_id'''] A__ = token_per_repo_id.get(UpperCAmelCase__) except ValueError: A__ = None with xopen(UpperCAmelCase__ , '''rb''' , use_auth_token=UpperCAmelCase__) as f: A__ = BytesIO(f.read()) A__ = PIL.Image.open(bytes_) else: A__ = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def SCREAMING_SNAKE_CASE ( self : Dict) ->Union["FeatureType", Dict[str, "FeatureType"]]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('''binary'''), "path": Value('''string'''), } ) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray]) ->pa.StructArray: '''simple docstring''' if pa.types.is_string(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) A__ = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('''bytes''') >= 0: A__ = storage.field('''bytes''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.binary()) if storage.type.get_field_index('''path''') >= 0: A__ = storage.field('''path''') else: A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null()) elif pa.types.is_list(storage.type): A__ = pa.array( [encode_np_array(np.array(UpperCAmelCase__))['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) A__ = pa.array([None] * len(UpperCAmelCase__) , type=pa.string()) A__ = pa.StructArray.from_arrays( [bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : pa.StructArray) ->pa.StructArray: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(UpperCAmelCase__ : Dict): with xopen(UpperCAmelCase__ , '''rb''') as f: A__ = f.read() return bytes_ A__ = pa.array( [ (path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) A__ = pa.array( [os.path.basename(UpperCAmelCase__) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , ) A__ = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null()) return array_cast(UpperCAmelCase__ , self.pa_type) def SCREAMING_SNAKE_CASE ( ) -> List[str]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() A__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def SCREAMING_SNAKE_CASE ( lowercase_ ) -> bytes: """simple docstring""" A__ = BytesIO() if image.format in list_image_compression_formats(): A__ = image.format else: A__ = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF''' image.save(lowercase_ , format=lowercase_ ) return buffer.getvalue() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if hasattr(lowercase_ , '''filename''' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> dict: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) A__ = array.dtype A__ = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER A__ = dtype.kind A__ = dtype.itemsize A__ = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: A__ = np.dtype('''|u1''' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" ) if dtype is not dest_dtype: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: A__ = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: A__ = dtype_byteorder + dtype_kind + str(lowercase_ ) A__ = np.dtype(lowercase_ ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" ) A__ = PIL.Image.fromarray(array.astype(lowercase_ ) ) return {"path": None, "bytes": image_to_bytes(lowercase_ )} def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[dict]: """simple docstring""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('''To support encoding images, please install \'Pillow\'.''' ) if objs: A__ , A__ = first_non_null_value(lowercase_ ) if isinstance(lowercase_ , lowercase_ ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(lowercase_ , np.ndarray ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] elif isinstance(lowercase_ , PIL.Image.Image ): A__ = no_op_if_value_is_null(lowercase_ ) return [obj_to_image_dict_func(lowercase_ ) for obj in objs] else: return objs else: return objs
87
0
'''simple docstring''' from __future__ import annotations class _snake_case : def __init__( self , a__ = 0 ) -> List[str]: '''simple docstring''' snake_case_ = key def lowerCAmelCase__ ( self , a__ , a__ ) -> list[str]: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) snake_case_ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCAmelCase__ ) ^ key ) for ch in content] def lowerCAmelCase__ ( self , a__ , a__ ) -> list[str]: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) snake_case_ = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCAmelCase__ ) ^ key ) for ch in content] def lowerCAmelCase__ ( self , a__ , a__ = 0 ) -> str: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) snake_case_ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned snake_case_ = "" for ch in content: ans += chr(ord(UpperCAmelCase__ ) ^ key ) return ans def lowerCAmelCase__ ( self , a__ , a__ = 0 ) -> str: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) snake_case_ = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned snake_case_ = "" for ch in content: ans += chr(ord(UpperCAmelCase__ ) ^ key ) return ans def lowerCAmelCase__ ( self , a__ , a__ = 0 ) -> bool: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) try: with open(UpperCAmelCase__ ) as fin, open("encrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(UpperCAmelCase__ , UpperCAmelCase__ ) ) except OSError: return False return True def lowerCAmelCase__ ( self , a__ , a__ ) -> bool: '''simple docstring''' assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) try: with open(UpperCAmelCase__ ) as fin, open("decrypt.out" , "w+" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(UpperCAmelCase__ , UpperCAmelCase__ ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
400
from __future__ import annotations import unittest from transformers import MobileBertConfig, is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_MODEL_FOR_PRETRAINING_MAPPING, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertModel, ) @require_tf class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ( ( TFMobileBertModel, TFMobileBertForMaskedLM, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertForMultipleChoice, ) if is_tf_available() else () ) UpperCAmelCase__ = ( { '''feature-extraction''': TFMobileBertModel, '''fill-mask''': TFMobileBertForMaskedLM, '''question-answering''': TFMobileBertForQuestionAnswering, '''text-classification''': TFMobileBertForSequenceClassification, '''token-classification''': TFMobileBertForTokenClassification, '''zero-shot''': TFMobileBertForSequenceClassification, } if is_tf_available() else {} ) UpperCAmelCase__ = False UpperCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str=False) ->Optional[Any]: '''simple docstring''' A__ = super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__) if return_labels: if model_class in get_values(UpperCAmelCase__): A__ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa) return inputs_dict class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : List[str]=32 , UpperCAmelCase__ : Optional[int]=32 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Optional[Any]=37 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]=512 , UpperCAmelCase__ : Tuple=16 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : List[str]=4 , UpperCAmelCase__ : Tuple=None , ) ->Any: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = scope A__ = embedding_size def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) A__ = ids_tensor([self.batch_size] , self.num_choices) A__ = MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertModel(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) A__ = [input_ids, input_mask] A__ = model(UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple) ->Optional[Any]: '''simple docstring''' A__ = TFMobileBertForMaskedLM(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]) ->int: '''simple docstring''' A__ = TFMobileBertForNextSentencePrediction(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int) ->List[Any]: '''simple docstring''' A__ = TFMobileBertForPreTraining(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual( result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple) ->Dict: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForSequenceClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int) ->Dict: '''simple docstring''' A__ = self.num_choices A__ = TFMobileBertForMultipleChoice(config=UpperCAmelCase__) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = tf.tile(tf.expand_dims(UpperCAmelCase__ , 1) , (1, self.num_choices, 1)) A__ = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int]) ->int: '''simple docstring''' A__ = self.num_labels A__ = TFMobileBertForTokenClassification(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertForQuestionAnswering(config=UpperCAmelCase__) A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def SCREAMING_SNAKE_CASE ( self : Any) ->str: '''simple docstring''' A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = TFMobileBertModelTest.TFMobileBertModelTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Dict: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : str) ->List[Any]: '''simple docstring''' for model_name in ["google/mobilebert-uncased"]: A__ = TFMobileBertModel.from_pretrained(UpperCAmelCase__) self.assertIsNotNone(UpperCAmelCase__) @require_tf class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any: '''simple docstring''' A__ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''') A__ = tf.constant([[0, 1, 2, 3, 4, 5]]) A__ = model(UpperCAmelCase__)[0] A__ = [1, 6, 30_522] self.assertEqual(output.shape , UpperCAmelCase__) A__ = tf.constant( [ [ [-4.5919547, -9.248295, -9.645256], [-6.7306175, -6.440284, -6.6052837], [-7.2743506, -6.7847915, -6.024673], ] ]) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)
87
0
'''simple docstring''' def lowercase__( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] )-> List[str]: """simple docstring""" if height >= 1: move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ ) move_disk(lowercase_ , lowercase_ ) move_tower(height - 1 , lowercase_ , lowercase_ , lowercase_ ) def lowercase__( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple )-> str: """simple docstring""" print("moving disk from" , lowercase_ , "to" , lowercase_ ) def lowercase__( )-> int: """simple docstring""" _UpperCamelCase = int(input("Height of hanoi: " ).strip() ) move_tower(lowercase_ , "A" , "B" , "C" ) if __name__ == "__main__": main()
138
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int=13 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : str=224 , UpperCAmelCase__ : str=30 , UpperCAmelCase__ : Tuple=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Tuple=[0.5, 0.5, 0.5] , ) ->str: '''simple docstring''' A__ = size if size is not None else {'''height''': 18, '''width''': 18} A__ = parent A__ = batch_size A__ = num_channels A__ = image_size A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[int]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE ( self : List[str]) ->str: '''simple docstring''' A__ = EfficientFormerImageProcessorTester(self) @property def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''')) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''')) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self : str) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->List[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) def SCREAMING_SNAKE_CASE ( self : Tuple) ->Optional[Any]: '''simple docstring''' A__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor) # Test not batched input A__ = image_processor(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , ) # Test batched A__ = image_processor(UpperCAmelCase__ , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size['''height'''], self.image_proc_tester.size['''width'''], ) , )
87
0
import argparse import json import os from collections import OrderedDict import torch from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def UpperCamelCase ( _A, _A, _A, _A, _A ): """simple docstring""" with open(lowercase_ ) as metadata_file: __magic_name__ : List[Any] = json.load(lowercase_ ) __magic_name__ : List[Any] = LukeConfig(use_entity_aware_attention=lowercase_, **metadata["""model_config"""] ) # Load in the weights from the checkpoint_path __magic_name__ : Union[str, Any] = torch.load(lowercase_, map_location="""cpu""" )["""module"""] # Load the entity vocab file __magic_name__ : Dict = load_original_entity_vocab(lowercase_ ) # add an entry for [MASK2] __magic_name__ : Any = max(entity_vocab.values() ) + 1 config.entity_vocab_size += 1 __magic_name__ : str = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] ) # Add special tokens to the token vocabulary for downstream tasks __magic_name__ : Any = AddedToken("""<ent>""", lstrip=lowercase_, rstrip=lowercase_ ) __magic_name__ : Optional[Any] = AddedToken("""<ent2>""", lstrip=lowercase_, rstrip=lowercase_ ) tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(f'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(lowercase_ ) with open(os.path.join(lowercase_, """tokenizer_config.json""" ), """r""" ) as f: __magic_name__ : Tuple = json.load(lowercase_ ) __magic_name__ : int = """MLukeTokenizer""" with open(os.path.join(lowercase_, """tokenizer_config.json""" ), """w""" ) as f: json.dump(lowercase_, lowercase_ ) with open(os.path.join(lowercase_, MLukeTokenizer.vocab_files_names["""entity_vocab_file"""] ), """w""" ) as f: json.dump(lowercase_, lowercase_ ) __magic_name__ : Tuple = MLukeTokenizer.from_pretrained(lowercase_ ) # Initialize the embeddings of the special tokens __magic_name__ : Dict = tokenizer.convert_tokens_to_ids(["""@"""] )[0] __magic_name__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(["""#"""] )[0] __magic_name__ : Dict = state_dict["""embeddings.word_embeddings.weight"""] __magic_name__ : Optional[int] = word_emb[ent_init_index].unsqueeze(0 ) __magic_name__ : List[str] = word_emb[enta_init_index].unsqueeze(0 ) __magic_name__ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] ) # add special tokens for 'entity_predictions.bias' for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]: __magic_name__ : Optional[int] = state_dict[bias_name] __magic_name__ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 ) __magic_name__ : int = decoder_bias[enta_init_index].unsqueeze(0 ) __magic_name__ : List[Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: __magic_name__ : Any = f'encoder.layer.{layer_index}.attention.self.' __magic_name__ : str = state_dict[prefix + matrix_name] __magic_name__ : int = state_dict[prefix + matrix_name] __magic_name__ : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks __magic_name__ : Any = state_dict["""entity_embeddings.entity_embeddings.weight"""] __magic_name__ : Dict = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0 ) __magic_name__ : Optional[int] = torch.cat([entity_emb, entity_mask_emb] ) # add [MASK2] for 'entity_predictions.bias' __magic_name__ : int = state_dict["""entity_predictions.bias"""] __magic_name__ : int = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0 ) __magic_name__ : Optional[int] = torch.cat([entity_prediction_bias, entity_mask_bias] ) __magic_name__ : Any = LukeForMaskedLM(config=lowercase_ ).eval() state_dict.pop("""entity_predictions.decoder.weight""" ) state_dict.pop("""lm_head.decoder.weight""" ) state_dict.pop("""lm_head.decoder.bias""" ) __magic_name__ : int = OrderedDict() for key, value in state_dict.items(): if not (key.startswith("""lm_head""" ) or key.startswith("""entity_predictions""" )): __magic_name__ : int = state_dict[key] else: __magic_name__ : Tuple = state_dict[key] __magic_name__ ,__magic_name__ : int = model.load_state_dict(lowercase_, strict=lowercase_ ) if set(lowercase_ ) != {"luke.embeddings.position_ids"}: raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' ) if set(lowercase_ ) != { "lm_head.decoder.weight", "lm_head.decoder.bias", "entity_predictions.decoder.weight", }: raise ValueError(f'Unexpected missing_keys: {missing_keys}' ) model.tie_weights() assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all() assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all() # Check outputs __magic_name__ : Union[str, Any] = MLukeTokenizer.from_pretrained(lowercase_, task="""entity_classification""" ) __magic_name__ : Optional[Any] = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).""" __magic_name__ : Optional[Any] = (0, 9) __magic_name__ : List[str] = tokenizer(lowercase_, entity_spans=[span], return_tensors="""pt""" ) __magic_name__ : Optional[int] = model(**lowercase_ ) # Verify word hidden states if model_size == "large": raise NotImplementedError else: # base __magic_name__ : Dict = torch.Size((1, 33, 768) ) __magic_name__ : Dict = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3], lowercase_, atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": raise NotImplementedError else: # base __magic_name__ : str = torch.Size((1, 1, 768) ) __magic_name__ : int = torch.tensor([[-0.1482, 0.0609, 0.0322]] ) if not (outputs.entity_last_hidden_state.shape == expected_shape): raise ValueError( f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' f' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3], lowercase_, atol=1e-4 ): raise ValueError # Verify masked word/entity prediction __magic_name__ : Union[str, Any] = MLukeTokenizer.from_pretrained(lowercase_ ) __magic_name__ : List[str] = """Tokyo is the capital of <mask>.""" __magic_name__ : int = (24, 30) __magic_name__ : Dict = tokenizer(lowercase_, entity_spans=[span], return_tensors="""pt""" ) __magic_name__ : int = model(**lowercase_ ) __magic_name__ : Dict = encoding["""input_ids"""][0].tolist() __magic_name__ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>""" ) ) __magic_name__ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 ) assert "Japan" == tokenizer.decode(lowercase_ ) __magic_name__ : List[Any] = outputs.entity_logits[0][0].argmax().item() __magic_name__ : List[Any] = [ entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id ] assert [e for e in multilingual_predicted_entities if e.startswith("""en:""" )][0] == "en:Japan" # Finally, save our PyTorch model and tokenizer print("""Saving PyTorch model to {}""".format(lowercase_ ) ) model.save_pretrained(lowercase_ ) def UpperCamelCase ( _A ): """simple docstring""" __magic_name__ : Tuple = ["""[MASK]""", """[PAD]""", """[UNK]"""] __magic_name__ : Dict = [json.loads(lowercase_ ) for line in open(lowercase_ )] __magic_name__ : Union[str, Any] = {} for entry in data: __magic_name__ : Optional[int] = entry["""id"""] for entity_name, language in entry["entities"]: if entity_name in SPECIAL_TOKENS: __magic_name__ : List[Any] = entity_id break __magic_name__ : int = f'{language}:{entity_name}' __magic_name__ : Union[str, Any] = entity_id return new_mapping if __name__ == "__main__": __magic_name__: List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) __magic_name__: Optional[Any] = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
324
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance _lowerCamelCase : Dict = 6_378_137.0 _lowerCamelCase : Union[str, Any] = 6_356_752.314_245 _lowerCamelCase : List[Any] = 6378137 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float: """simple docstring""" A__ = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS # Intermediate P and Q values A__ = (b_lata + b_lata) / 2 A__ = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2) A__ = cos(sigma / 2 ) ** 2 A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2) A__ = sin(sigma / 2 ) ** 2 A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
87
0
def a__ ( __UpperCamelCase = 1_0_0_0 ): return sum(e for e in range(3 , lowercase_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f"{solution() = }")
140
import heapq import sys import numpy as np _lowerCamelCase : Any = tuple[int, int] class UpperCamelCase_ : '''simple docstring''' def __init__( self : Any) ->str: '''simple docstring''' A__ = [] A__ = set() def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('''inf''') def SCREAMING_SNAKE_CASE ( self : Tuple) ->str: '''simple docstring''' return len(self.elements) == 0 def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any]) ->List[str]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(UpperCAmelCase__) else: # update # print("update", item) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((A__) , (A__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : List[Any]) ->Union[str, Any]: '''simple docstring''' if item in self.set: self.set.remove(UpperCAmelCase__) A__ = [] ((A__) , (A__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((A__) , (A__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[str]: '''simple docstring''' return self.elements[0][1] def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' ((A__) , (A__)) = heapq.heappop(self.elements) self.set.remove(UpperCAmelCase__) return (priority, item) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.array(lowercase_ ) A__ = np.array(lowercase_ ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" return consistent_heuristic(lowercase_ , lowercase_ ) // t def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]: """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]: """simple docstring""" A__ = g_function[start] + Wa * heuristics[i](lowercase_ , lowercase_ ) return ans def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = np.chararray((n, n) ) for i in range(lowercase_ ): for j in range(lowercase_ ): A__ = '''*''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (j, (n - 1) - i) in blocks: A__ = '''#''' A__ = '''-''' A__ = back_pointer[goal] while x != start: ((A__) , (A__)) = x # print(x) A__ = '''-''' A__ = back_pointer[x] A__ = '''-''' for i in range(lowercase_ ): for j in range(lowercase_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=''' ''' ) print('''<-- End position''' , end=''' ''' ) else: print(grid[i][j] , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) print('''PATH TAKEN BY THE ALGORITHM IS:-''' ) A__ = back_pointer[goal] while x != start: print(lowercase_ , end=''' ''' ) A__ = back_pointer[x] print(lowercase_ ) sys.exit() def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict: """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Union[str, Any]: """simple docstring""" for itera in range(lowercase_ ): open_list[itera].remove_element(lowercase_ ) # print("s", s) # print("j", j) ((A__) , (A__)) = s A__ = (x - 1, y) A__ = (x + 1, y) A__ = (x, y + 1) A__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(lowercase_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(lowercase_ ) A__ = -1 A__ = float('''inf''' ) if valid(lowercase_ ) and g_function[neighbours] > g_function[s] + 1: A__ = g_function[s] + 1 A__ = s if neighbours not in close_list_anchor: open_list[0].put(lowercase_ , key(lowercase_ , 0 , lowercase_ , lowercase_ ) ) if neighbours not in close_list_inad: for var in range(1 , lowercase_ ): if key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) <= Wa * key( lowercase_ , 0 , lowercase_ , lowercase_ ): open_list[j].put( lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) def SCREAMING_SNAKE_CASE ( ) -> Optional[int]: """simple docstring""" A__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list _lowerCamelCase : Dict = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} _lowerCamelCase : Optional[Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] _lowerCamelCase : Optional[int] = make_common_ground() _lowerCamelCase : Optional[Any] = blocks_blk # hyper parameters _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : Optional[int] = 1 _lowerCamelCase : List[Any] = 20 _lowerCamelCase : Any = 3 # one consistent and two other inconsistent # start and end destination _lowerCamelCase : str = (0, 0) _lowerCamelCase : Tuple = (n - 1, n - 1) _lowerCamelCase : int = 1 def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = {start: 0, goal: float('''inf''' )} A__ = {start: -1, goal: -1} A__ = [] A__ = set() for i in range(lowercase_ ): open_list.append(PriorityQueue() ) open_list[i].put(lowercase_ , key(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) ) A__ = [] A__ = [] while open_list[0].minkey() < float('''inf''' ): for i in range(1 , lowercase_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ , A__ = open_list[i].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_inad.append(lowercase_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('''inf''' ): do_something(lowercase_ , lowercase_ , lowercase_ ) else: A__ = open_list[0].top_show() visited.add(lowercase_ ) expand_state( lowercase_ , 0 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) close_list_anchor.append(lowercase_ ) print('''No path found to goal''' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(lowercase_ ): if (j, i) in blocks: print('''#''' , end=''' ''' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('''*''' , end=''' ''' ) else: print('''-''' , end=''' ''' ) else: print('''*''' , end=''' ''' ) if (j, i) == (n - 1, n - 1): print('''<-- End position''' , end=''' ''' ) print() print('''^''' ) print('''Start position''' ) print() print('''# is an obstacle''' ) print('''- is the path taken by algorithm''' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
87
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch UpperCamelCase__ :Dict = logging.get_logger(__name__) @dataclass class A: """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=6.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="fp4" , SCREAMING_SNAKE_CASE__=False , **SCREAMING_SNAKE_CASE__ , ) -> List[str]: """simple docstring""" _UpperCamelCase :Dict = load_in_abit _UpperCamelCase :Optional[Any] = load_in_abit _UpperCamelCase :Optional[Any] = llm_inta_threshold _UpperCamelCase :Optional[int] = llm_inta_skip_modules _UpperCamelCase :Dict = llm_inta_enable_fpaa_cpu_offload _UpperCamelCase :str = llm_inta_has_fpaa_weight _UpperCamelCase :List[Any] = bnb_abit_quant_type _UpperCamelCase :List[str] = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: _UpperCamelCase :Optional[Any] = torch.floataa elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _UpperCamelCase :List[str] = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , torch.dtype ): _UpperCamelCase :Optional[int] = bnb_abit_compute_dtype else: raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' ) self.post_init() def _UpperCamelCase( self ) -> Optional[Any]: """simple docstring""" if not isinstance(self.llm_inta_threshold , UpperCAmelCase__ ): raise ValueError('''llm_int8_threshold must be a float''' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase__ ): raise ValueError('''llm_int8_skip_modules must be a list of strings''' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase__ ): raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' ) if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase__ ): raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' ) if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase__ ): raise ValueError('''bnb_4bit_quant_type must be a string''' ) if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase__ ): raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' ) if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse( '''0.39.0''' ): raise ValueError( '''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' ) def _UpperCamelCase( self ) -> Tuple: """simple docstring""" return self.load_in_abit or self.load_in_abit def _UpperCamelCase( self ) -> List[Any]: """simple docstring""" if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _UpperCamelCase( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" _UpperCamelCase :List[Any] = cls(**UpperCAmelCase__ ) _UpperCamelCase :Optional[int] = [] for key, value in kwargs.items(): if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ): setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) to_remove.append(UpperCAmelCase__ ) for key in to_remove: kwargs.pop(UpperCAmelCase__ , UpperCAmelCase__ ) if return_unused_kwargs: return config, kwargs else: return config def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ ) -> List[str]: """simple docstring""" with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as writer: _UpperCamelCase :Dict = self.to_dict() _UpperCamelCase :Optional[int] = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + '''\n''' writer.write(UpperCAmelCase__ ) def _UpperCamelCase( self ) -> Dict[str, Any]: """simple docstring""" _UpperCamelCase :Tuple = copy.deepcopy(self.__dict__ ) _UpperCamelCase :List[Any] = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1] return output def __repr__( self ) -> List[Any]: """simple docstring""" return f"{self.__class__.__name__} {self.to_json_string()}" def _UpperCamelCase( self , SCREAMING_SNAKE_CASE__ = True ) -> str: """simple docstring""" if use_diff is True: _UpperCamelCase :Tuple = self.to_diff_dict() else: _UpperCamelCase :Any = self.to_dict() return json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + "\n" def _UpperCamelCase( self ) -> Dict[str, Any]: """simple docstring""" _UpperCamelCase :Union[str, Any] = self.to_dict() # get the default config dict _UpperCamelCase :List[Any] = BitsAndBytesConfig().to_dict() _UpperCamelCase :Tuple = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: _UpperCamelCase :List[Any] = value return serializable_config_dict
355
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowerCamelCase : Optional[Any] = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine""" def SCREAMING_SNAKE_CASE ( ) -> Dict: """simple docstring""" A__ = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A__ = get_sagemaker_input() else: A__ = get_cluster_input() return config def SCREAMING_SNAKE_CASE ( lowercase_=None ) -> List[Any]: """simple docstring""" if subparsers is not None: A__ = subparsers.add_parser('''config''' , description=lowercase_ ) else: A__ = argparse.ArgumentParser('''Accelerate config command''' , description=lowercase_ ) parser.add_argument( '''--config_file''' , default=lowercase_ , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Any: """simple docstring""" A__ = get_user_input() if args.config_file is not None: A__ = args.config_file else: if not os.path.isdir(lowercase_ ): os.makedirs(lowercase_ ) A__ = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(lowercase_ ) else: config.to_yaml_file(lowercase_ ) print(f"""accelerate configuration saved at {config_file}""" ) def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: """simple docstring""" A__ = config_command_parser() A__ = parser.parse_args() config_command(lowercase_ ) if __name__ == "__main__": main()
87
0
import re from filelock import FileLock try: import nltk UpperCamelCase__ = True except (ImportError, ModuleNotFoundError): UpperCamelCase__ = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def UpperCamelCase__ ( UpperCAmelCase_ ) -> str: '''simple docstring''' re.sub('''<n>''' , '''''' , lowercase_ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowercase_ ) )
322
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() _lowerCamelCase : int = logging.get_logger("""transformers.models.speecht5""") def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Tuple: """simple docstring""" hf_model.apply_weight_norm() A__ = checkpoint['''input_conv.weight_g'''] A__ = checkpoint['''input_conv.weight_v'''] A__ = checkpoint['''input_conv.bias'''] for i in range(len(config.upsample_rates ) ): A__ = checkpoint[f"""upsamples.{i}.1.weight_g"""] A__ = checkpoint[f"""upsamples.{i}.1.weight_v"""] A__ = checkpoint[f"""upsamples.{i}.1.bias"""] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs1.{j}.1.bias"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_g"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.weight_v"""] A__ = checkpoint[f"""blocks.{i}.convs2.{j}.1.bias"""] A__ = checkpoint['''output_conv.1.weight_g'''] A__ = checkpoint['''output_conv.1.weight_v'''] A__ = checkpoint['''output_conv.1.bias'''] hf_model.remove_weight_norm() @torch.no_grad() def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> str: """simple docstring""" if config_path is not None: A__ = SpeechTaHifiGanConfig.from_pretrained(lowercase_ ) else: A__ = SpeechTaHifiGanConfig() A__ = SpeechTaHifiGan(lowercase_ ) A__ = torch.load(lowercase_ ) load_weights(orig_checkpoint['''model''']['''generator'''] , lowercase_ , lowercase_ ) A__ = np.load(lowercase_ ) A__ = stats[0].reshape(-1 ) A__ = stats[1].reshape(-1 ) A__ = torch.from_numpy(lowercase_ ).float() A__ = torch.from_numpy(lowercase_ ).float() model.save_pretrained(lowercase_ ) if repo_id: print('''Pushing to the hub...''' ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCamelCase : Any = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) _lowerCamelCase : List[str] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
87
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __A = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") @require_sentencepiece @require_tokenizers class snake_case ( UpperCAmelCase__, unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[str] = GPTSwaTokenizer SCREAMING_SNAKE_CASE_ : Any = False SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : Union[str, Any] = False def lowercase_ ( self : int)-> Tuple: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase: str = GPTSwaTokenizer(UpperCAmelCase__ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>") tokenizer.save_pretrained(self.tmpdirname) def lowercase_ ( self : Optional[int] , UpperCamelCase__ : List[Any])-> Optional[Any]: '''simple docstring''' __lowerCAmelCase: Union[str, Any] = "This is a test" __lowerCAmelCase: List[Any] = "This is a test" return input_text, output_text def lowercase_ ( self : int)-> Optional[int]: '''simple docstring''' __lowerCAmelCase: Dict = "<s>" __lowerCAmelCase: Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase__) , UpperCAmelCase__) def lowercase_ ( self : Optional[int])-> Dict: '''simple docstring''' __lowerCAmelCase: Any = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<unk>") self.assertEqual(vocab_keys[1] , "<s>") self.assertEqual(vocab_keys[-1] , "j") self.assertEqual(len(UpperCAmelCase__) , 2_0_0_0) def lowercase_ ( self : int)-> Dict: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0) def lowercase_ ( self : List[str])-> List[Any]: '''simple docstring''' __lowerCAmelCase: List[Any] = GPTSwaTokenizer(UpperCAmelCase__) __lowerCAmelCase: Tuple = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2]) __lowerCAmelCase: Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") # fmt: off self.assertListEqual( UpperCAmelCase__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on __lowerCAmelCase: int = tokenizer.convert_tokens_to_ids(UpperCAmelCase__) self.assertListEqual( UpperCAmelCase__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , ) __lowerCAmelCase: Optional[int] = tokenizer.convert_ids_to_tokens(UpperCAmelCase__) # fmt: off self.assertListEqual( UpperCAmelCase__ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."]) # fmt: on def lowercase_ ( self : str)-> List[Any]: '''simple docstring''' __lowerCAmelCase: Tuple = GPTSwaTokenizer(UpperCAmelCase__) __lowerCAmelCase: Union[str, Any] = ["This is a test", "I was born in 92000, and this is falsé."] __lowerCAmelCase: Optional[Any] = [ [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2], [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertListEqual(tokenizer.encode_fast(UpperCAmelCase__) , UpperCAmelCase__) # Test that decode_fast returns the input text for text, token_ids in zip(UpperCAmelCase__ , UpperCAmelCase__): self.assertEqual(tokenizer.decode_fast(UpperCAmelCase__) , UpperCAmelCase__) @slow def lowercase_ ( self : Any)-> int: '''simple docstring''' __lowerCAmelCase: Dict = [ "<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off __lowerCAmelCase: Optional[int] = {"input_ids": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCAmelCase__ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=UpperCAmelCase__ , )
346
import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : Dict=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : Tuple=5 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : Tuple=50 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=None , ) ->Union[str, Any]: '''simple docstring''' A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = initializer_range A__ = use_labels A__ = scope def SCREAMING_SNAKE_CASE ( self : int) ->Any: '''simple docstring''' A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = None if self.use_input_mask: A__ = random_attention_mask([self.batch_size, self.seq_length]) if self.use_labels: A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) A__ = self.get_config() return config, input_ids, input_mask, token_labels def SCREAMING_SNAKE_CASE ( self : int) ->int: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.prepare_config_and_inputs() A__ = True A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any] , ) ->Dict: '''simple docstring''' A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__) A__ = model(UpperCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] , ) ->Dict: '''simple docstring''' A__ = True A__ = BertGenerationEncoder(config=UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , ) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[int] , ) ->Any: '''simple docstring''' A__ = True A__ = True A__ = BertGenerationDecoder(config=UpperCAmelCase__).to(UpperCAmelCase__).eval() # first forward pass A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , ) A__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A__ = ids_tensor((self.batch_size, 3) , config.vocab_size) A__ = ids_tensor((self.batch_size, 3) , vocab_size=2) # append to next input_ids and A__ = torch.cat([input_ids, next_tokens] , dim=-1) A__ = torch.cat([input_mask, next_mask] , dim=-1) A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] A__ = model( UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['''hidden_states'''][0] # select random slice A__ = ids_tensor((1,) , output_from_past.shape[-1]).item() A__ = output_from_no_past[:, -3:, random_slice_idx].detach() A__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3)) def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] , *UpperCAmelCase__ : List[str] , ) ->List[Any]: '''simple docstring''' A__ = BertGenerationDecoder(UpperCAmelCase__) model.to(UpperCAmelCase__) model.eval() A__ = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]: '''simple docstring''' A__ , A__ , A__ , A__ = self.prepare_config_and_inputs() A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () UpperCAmelCase__ = (BertGenerationDecoder,) if is_torch_available() else () UpperCAmelCase__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = BertGenerationEncoderTester(self) A__ = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->List[str]: '''simple docstring''' self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Optional[Any]: '''simple docstring''' A__ , A__ , A__ , A__ = self.model_tester.prepare_config_and_inputs() A__ = '''bert''' self.model_tester.create_and_check_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : int) ->Optional[int]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() A__ = None self.model_tester.create_and_check_model_as_decoder( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , ) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]: '''simple docstring''' A__ = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*UpperCAmelCase__) @slow def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') self.assertIsNotNone(UpperCAmelCase__) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 1_024]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4)) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' A__ = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''') A__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]]) with torch.no_grad(): A__ = model(UpperCAmelCase__)[0] A__ = torch.Size([1, 8, 50_358]) self.assertEqual(output.shape , UpperCAmelCase__) A__ = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4))
87
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _UpperCAmelCase ( UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = "naver-clova-ix/donut-base-finetuned-docvqa" __SCREAMING_SNAKE_CASE : int = ( "This is a tool that answers a question about an document (pdf). It takes an input named `document` which " "should be the document containing the information, as well as a `question` that is the question about the " "document. It returns a text that contains the answer to the question." ) __SCREAMING_SNAKE_CASE : Optional[Any] = "document_qa" __SCREAMING_SNAKE_CASE : Dict = AutoProcessor __SCREAMING_SNAKE_CASE : int = VisionEncoderDecoderModel __SCREAMING_SNAKE_CASE : Optional[Any] = ["image", "text"] __SCREAMING_SNAKE_CASE : Any = ["text"] def __init__( self , *lowercase_ , **lowercase_ ) -> List[str]: if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ ) def a_ ( self , lowercase_ , lowercase_ ) -> Optional[int]: UpperCAmelCase = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' UpperCAmelCase = task_prompt.replace('{user_input}' , UpperCAmelCase__ ) UpperCAmelCase = self.pre_processor.tokenizer( UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors='pt' ).input_ids UpperCAmelCase = self.pre_processor(UpperCAmelCase__ , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def a_ ( self , lowercase_ ) -> Any: return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=UpperCAmelCase__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=UpperCAmelCase__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=UpperCAmelCase__ , ).sequences def a_ ( self , lowercase_ ) -> Dict: UpperCAmelCase = self.pre_processor.batch_decode(UpperCAmelCase__ )[0] UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) UpperCAmelCase = re.sub(R'<.*?>' , '' , UpperCAmelCase__ , count=1 ).strip() # remove first task start token UpperCAmelCase = self.pre_processor.tokenajson(UpperCAmelCase__ ) return sequence["answer"]
373
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict: """simple docstring""" A__ = set() A__ = [] def parse_line(lowercase_ ): for line in fp: if isinstance(lowercase_ , lowercase_ ): A__ = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(lowercase_ ) > 0: A__ = '''\n'''.join(lowercase_ ) # Only keep the warnings specified in `targets` if any(f""": {x}: """ in warning for x in targets ): selected_warnings.add(lowercase_ ) buffer.clear() continue else: A__ = line.strip() buffer.append(lowercase_ ) if from_gh: for filename in os.listdir(lowercase_ ): A__ = os.path.join(lowercase_ , lowercase_ ) if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with open(lowercase_ ) as fp: parse_line(lowercase_ ) else: try: with zipfile.ZipFile(lowercase_ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase_ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase_ ) as fp: parse_line(lowercase_ ) except Exception: logger.warning( f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" ) return selected_warnings def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str: """simple docstring""" A__ = set() A__ = [os.path.join(lowercase_ , lowercase_ ) for p in os.listdir(lowercase_ ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase_ , lowercase_ ) ) return selected_warnings if __name__ == "__main__": def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: """simple docstring""" return values.split(''',''' ) _lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowerCamelCase : List[Any] = parser.parse_args() _lowerCamelCase : List[str] = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowerCamelCase : Any = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowerCamelCase : Any = extract_warnings(args.output_dir, args.targets) _lowerCamelCase : Optional[Any] = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
87
0
import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __lowercase : """simple docstring""" @staticmethod def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict: pass def snake_case__ ( lowerCamelCase_ ): A : Optional[int] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __lowercase ( unittest.TestCase ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]: A : int = DepthEstimationPipeline(model=UpperCAmelCase__ , image_processor=UpperCAmelCase__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: A : List[str] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , UpperCAmelCase__ ) import datasets A : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) A : Dict = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , UpperCAmelCase__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def snake_case ( self ) -> List[str]: pass @slow @require_torch def snake_case ( self ) -> Tuple: A : Tuple = '''Intel/dpt-large''' A : str = pipeline('''depth-estimation''' , model=UpperCAmelCase__ ) A : Any = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) A : List[str] = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def snake_case ( self ) -> Optional[int]: self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
542
class UpperCamelCase_ : # Public class to implement a graph '''simple docstring''' def __init__( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = row A__ = col A__ = graph def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->bool: '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : list[list[bool]]) ->None: '''simple docstring''' A__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order A__ = [-1, 0, 1, -1, 1, -1, 0, 1] A__ = True # Make those cells visited for k in range(8): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->int: # And finally, count all islands. '''simple docstring''' A__ = [[False for j in range(self.COL)] for i in range(self.ROW)] A__ = 0 for i in range(self.ROW): for j in range(self.COL): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__) count += 1 return count
87
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) SCREAMING_SNAKE_CASE_ = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE_ = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
517
from __future__ import annotations import requests _lowerCamelCase : str = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = 1 , lowercase_ = "new" , lowercase_ = None ) -> dict: """simple docstring""" A__ = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(lowercase_ ) - valid_terms ) ): A__ = f"""Invalid search term: {invalid_search_terms}""" raise ValueError(lowercase_ ) A__ = requests.get( f"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'''User-agent''': '''A random string'''} , ) if response.status_code == 429: raise requests.HTTPError A__ = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(lowercase_ )} A__ = {} for id_ in range(lowercase_ ): A__ = { item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
87
0
'''simple docstring''' import argparse import torch from transformers import ( EncodecConfig, EncodecFeatureExtractor, EncodecModel, logging, ) # checkpoints downloaded from: # https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th # https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin # https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger("transformers.models.encodec") _lowerCAmelCase = { """quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""", """quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""", """quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""", """quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""", } _lowerCAmelCase = { """encoder.model.0.conv.conv""": """encoder.layers.0.conv""", """encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""", """encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""", """encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""", """encoder.model.3.conv.conv""": """encoder.layers.3.conv""", """encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""", """encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""", """encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""", """encoder.model.6.conv.conv""": """encoder.layers.6.conv""", """encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""", """encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""", """encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""", """encoder.model.9.conv.conv""": """encoder.layers.9.conv""", """encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""", """encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""", """encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""", """encoder.model.12.conv.conv""": """encoder.layers.12.conv""", """encoder.model.13.lstm""": """encoder.layers.13.lstm""", """encoder.model.15.conv.conv""": """encoder.layers.15.conv""", } _lowerCAmelCase = { """encoder.model.0.conv.norm""": """encoder.layers.0.norm""", """encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""", """encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""", """encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""", """encoder.model.3.conv.norm""": """encoder.layers.3.norm""", """encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""", """encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""", """encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""", """encoder.model.6.conv.norm""": """encoder.layers.6.norm""", """encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""", """encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""", """encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""", """encoder.model.9.conv.norm""": """encoder.layers.9.norm""", """encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""", """encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""", """encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""", """encoder.model.12.conv.norm""": """encoder.layers.12.norm""", """encoder.model.15.conv.norm""": """encoder.layers.15.norm""", } _lowerCAmelCase = { """decoder.model.0.conv.conv""": """decoder.layers.0.conv""", """decoder.model.1.lstm""": """decoder.layers.1.lstm""", """decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""", """decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""", """decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""", """decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""", """decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""", """decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""", """decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""", """decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""", """decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""", """decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""", """decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""", """decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""", """decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""", """decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""", """decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""", """decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""", """decoder.model.15.conv.conv""": """decoder.layers.15.conv""", } _lowerCAmelCase = { """decoder.model.0.conv.norm""": """decoder.layers.0.norm""", """decoder.model.3.convtr.norm""": """decoder.layers.3.norm""", """decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""", """decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""", """decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""", """decoder.model.6.convtr.norm""": """decoder.layers.6.norm""", """decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""", """decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""", """decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""", """decoder.model.9.convtr.norm""": """decoder.layers.9.norm""", """decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""", """decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""", """decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""", """decoder.model.12.convtr.norm""": """decoder.layers.12.norm""", """decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""", """decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""", """decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""", """decoder.model.15.conv.norm""": """decoder.layers.15.norm""", } _lowerCAmelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_DECODER, } _lowerCAmelCase = { **MAPPING_QUANTIZER, **MAPPING_ENCODER, **MAPPING_ENCODER_48K, **MAPPING_DECODER, **MAPPING_DECODER_48K, } _lowerCAmelCase = [] _lowerCAmelCase = [] def UpperCamelCase ( a , a , a , a , a ) -> List[str]: '''simple docstring''' for attribute in key.split('''.''' ): __magic_name__ = getattr(lowercase_ , lowercase_ ) if weight_type is not None: __magic_name__ = getattr(lowercase_ , lowercase_ ).shape else: __magic_name__ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": __magic_name__ = value elif weight_type == "weight_g": __magic_name__ = value elif weight_type == "weight_v": __magic_name__ = value elif weight_type == "bias": __magic_name__ = value elif weight_type == "running_mean": __magic_name__ = value elif weight_type == "running_var": __magic_name__ = value elif weight_type == "num_batches_tracked": __magic_name__ = value elif weight_type == "weight_ih_l0": __magic_name__ = value elif weight_type == "weight_hh_l0": __magic_name__ = value elif weight_type == "bias_ih_l0": __magic_name__ = value elif weight_type == "bias_hh_l0": __magic_name__ = value elif weight_type == "weight_ih_l1": __magic_name__ = value elif weight_type == "weight_hh_l1": __magic_name__ = value elif weight_type == "bias_ih_l1": __magic_name__ = value elif weight_type == "bias_hh_l1": __magic_name__ = value else: __magic_name__ = value logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def UpperCamelCase ( a , a ) -> int: '''simple docstring''' for key in ignore_keys: if key.endswith('''.*''' ): if name.startswith(key[:-1] ): return True elif ".*." in key: __magic_name__ , __magic_name__ = key.split('''.*.''' ) if prefix in name and suffix in name: return True elif key in name: return True return False def UpperCamelCase ( a , a , a ) -> Union[str, Any]: '''simple docstring''' __magic_name__ = [] if model_name == "encodec_24khz" or "encodec_32khz": __magic_name__ = MAPPING_24K elif model_name == "encodec_48khz": __magic_name__ = MAPPING_48K else: raise ValueError(F'''Unsupported model: {model_name}''' ) for name, value in orig_dict.items(): if should_ignore(lowercase_ , lowercase_ ): logger.info(F'''{name} was ignored''' ) continue __magic_name__ = False for key, mapped_key in MAPPING.items(): if "*" in key: __magic_name__ , __magic_name__ = key.split('''.*.''' ) if prefix in name and suffix in name: __magic_name__ = suffix if key in name: # HACK otherwise .embed gets initialized with .embed_avg too if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ): continue __magic_name__ = True if "*" in mapped_key: __magic_name__ = name.split(lowercase_ )[0].split('''.''' )[-2] __magic_name__ = mapped_key.replace('''*''' , lowercase_ ) if "weight_g" in name: __magic_name__ = '''weight_g''' elif "weight_v" in name: __magic_name__ = '''weight_v''' elif "weight_ih_l0" in name: __magic_name__ = '''weight_ih_l0''' elif "weight_hh_l0" in name: __magic_name__ = '''weight_hh_l0''' elif "bias_ih_l0" in name: __magic_name__ = '''bias_ih_l0''' elif "bias_hh_l0" in name: __magic_name__ = '''bias_hh_l0''' elif "weight_ih_l1" in name: __magic_name__ = '''weight_ih_l1''' elif "weight_hh_l1" in name: __magic_name__ = '''weight_hh_l1''' elif "bias_ih_l1" in name: __magic_name__ = '''bias_ih_l1''' elif "bias_hh_l1" in name: __magic_name__ = '''bias_hh_l1''' elif "bias" in name: __magic_name__ = '''bias''' elif "weight" in name: __magic_name__ = '''weight''' elif "running_mean" in name: __magic_name__ = '''running_mean''' elif "running_var" in name: __magic_name__ = '''running_var''' elif "num_batches_tracked" in name: __magic_name__ = '''num_batches_tracked''' else: __magic_name__ = None set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) continue if not is_used: unused_weights.append(lowercase_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) @torch.no_grad() def UpperCamelCase ( a , a , a , a=None , a=None , ) -> Optional[Any]: '''simple docstring''' if config_path is not None: __magic_name__ = EncodecConfig.from_pretrained(lowercase_ ) else: __magic_name__ = EncodecConfig() if model_name == "encodec_24khz": pass # config is already correct elif model_name == "encodec_32khz": __magic_name__ = [8, 5, 4, 4] __magic_name__ = [2.2] __magic_name__ = 64 __magic_name__ = 3_2000 __magic_name__ = 2048 __magic_name__ = False __magic_name__ = False __magic_name__ = False elif model_name == "encodec_48khz": __magic_name__ = [8, 5, 4, 2] __magic_name__ = [3.0, 6.0, 12.0, 24.0] __magic_name__ = 4_8000 __magic_name__ = 2 __magic_name__ = False __magic_name__ = '''time_group_norm''' __magic_name__ = True __magic_name__ = 1.0 __magic_name__ = 0.01 else: raise ValueError(F'''Unknown model name: {model_name}''' ) __magic_name__ = EncodecModel(lowercase_ ) __magic_name__ = EncodecFeatureExtractor( feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , ) feature_extractor.save_pretrained(lowercase_ ) __magic_name__ = torch.load(lowercase_ ) if "best_state" in original_checkpoint: # we might have a training state saved, in which case discard the yaml results and just retain the weights __magic_name__ = original_checkpoint['''best_state'''] recursively_load_weights(lowercase_ , lowercase_ , lowercase_ ) model.save_pretrained(lowercase_ ) if repo_id: print('''Pushing to the hub...''' ) feature_extractor.push_to_hub(lowercase_ ) model.push_to_hub(lowercase_ ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( "--model", default="encodec_24khz", type=str, help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.", ) parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model." ) parser.add_argument( "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub." ) _lowerCAmelCase = parser.parse_args() convert_checkpoint( args.model, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
432
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""PLBartTokenizer"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """PLBartForCausalLM""", """PLBartForConditionalGeneration""", """PLBartForSequenceClassification""", """PLBartModel""", """PLBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
88
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
1
"""simple docstring""" # Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib UpperCAmelCase = get_logger() UpperCAmelCase = None class lowercase__ ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE) -> int: super().__init__(features=SCREAMING_SNAKE_CASE) import jax from jaxlib.xla_client import Device if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): raise ValueError( F'Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` ' """is not serializable neither with `pickle` nor with `dill`. Instead you can surround """ """the device with `str()` to get its string identifier that will be internally mapped """ """to the actual `jaxlib.xla_extension.Device`.""") _lowerCamelCase : str = device if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _lowerCamelCase : Optional[int] = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( F'Device with string identifier {self.device} not listed among the available ' F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default ' F'device: {str(jax.devices()[0])}.') _lowerCamelCase : str = str(jax.devices()[0]) _lowerCamelCase : Tuple = jnp_array_kwargs @staticmethod def UpperCamelCase_ ( ) -> Dict[str, "jaxlib.xla_extension.Device"]: import jax return {str(SCREAMING_SNAKE_CASE): device for device in jax.devices()} def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]: import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and column: if all( isinstance(SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(SCREAMING_SNAKE_CASE , axis=0) return column def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: import jax import jax.numpy as jnp if isinstance(SCREAMING_SNAKE_CASE , (str, bytes, type(SCREAMING_SNAKE_CASE))): return value elif isinstance(SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() _lowerCamelCase : List[str] = {} if isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: _lowerCamelCase : int = {"""dtype""": jnp.intaa} else: _lowerCamelCase : Dict = {"""dtype""": jnp.intaa} elif isinstance(SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): _lowerCamelCase : Any = {"""dtype""": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image): _lowerCamelCase : List[Any] = np.asarray(SCREAMING_SNAKE_CASE) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: _lowerCamelCase : List[Any] = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs}) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any: import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(SCREAMING_SNAKE_CASE , """__array__""") and not isinstance(SCREAMING_SNAKE_CASE , jax.Array): _lowerCamelCase : str = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(SCREAMING_SNAKE_CASE , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE) for substruct in data_struct]) elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple)): return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE) for substruct in data_struct]) return self._tensorize(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE , map_list=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Mapping: _lowerCamelCase : List[str] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE) return self.recursive_tensorize(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> "jax.Array": _lowerCamelCase : str = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE , pa_table.column_names[0]) _lowerCamelCase : List[str] = self.recursive_tensorize(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = self._consolidate(SCREAMING_SNAKE_CASE) return column def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Mapping: _lowerCamelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = self.recursive_tensorize(SCREAMING_SNAKE_CASE) for column_name in batch: _lowerCamelCase : int = self._consolidate(batch[column_name]) return batch
88
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""", """WavLMForAudioFrameClassification""", """WavLMForCTC""", """WavLMForSequenceClassification""", """WavLMForXVector""", """WavLMModel""", """WavLMPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavlm import ( WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST, WavLMForAudioFrameClassification, WavLMForCTC, WavLMForSequenceClassification, WavLMForXVector, WavLMModel, WavLMPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
1
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""", # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class lowercase__ ( A_ ): __UpperCAmelCase = '''wav2vec2''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="sum" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Tuple: super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = feat_extract_norm _lowerCamelCase : Optional[Any] = feat_extract_activation _lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = list(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = conv_bias _lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings _lowerCamelCase : Dict = num_conv_pos_embedding_groups _lowerCamelCase : Tuple = len(self.conv_dim) _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Union[str, Any] = intermediate_size _lowerCamelCase : Optional[int] = hidden_act _lowerCamelCase : Tuple = num_attention_heads _lowerCamelCase : List[str] = hidden_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Optional[int] = feat_proj_dropout _lowerCamelCase : str = final_dropout _lowerCamelCase : List[str] = layerdrop _lowerCamelCase : Tuple = layer_norm_eps _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : List[Any] = vocab_size _lowerCamelCase : str = do_stable_layer_norm _lowerCamelCase : str = use_weighted_layer_sum if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _lowerCamelCase : List[str] = apply_spec_augment _lowerCamelCase : List[str] = mask_time_prob _lowerCamelCase : List[Any] = mask_time_length _lowerCamelCase : Optional[int] = mask_time_min_masks _lowerCamelCase : Optional[Any] = mask_feature_prob _lowerCamelCase : Optional[int] = mask_feature_length _lowerCamelCase : Any = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _lowerCamelCase : Optional[int] = num_codevectors_per_group _lowerCamelCase : Union[str, Any] = num_codevector_groups _lowerCamelCase : List[str] = contrastive_logits_temperature _lowerCamelCase : List[Any] = feat_quantizer_dropout _lowerCamelCase : List[Any] = num_negatives _lowerCamelCase : Optional[Any] = codevector_dim _lowerCamelCase : Union[str, Any] = proj_codevector_dim _lowerCamelCase : Optional[int] = diversity_loss_weight # ctc loss _lowerCamelCase : str = ctc_loss_reduction _lowerCamelCase : Any = ctc_zero_infinity # adapter _lowerCamelCase : str = add_adapter _lowerCamelCase : List[Any] = adapter_kernel_size _lowerCamelCase : List[Any] = adapter_stride _lowerCamelCase : List[str] = num_adapter_layers _lowerCamelCase : int = output_hidden_size or hidden_size _lowerCamelCase : Union[str, Any] = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. _lowerCamelCase : int = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = list(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = list(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = xvector_output_dim @property def UpperCamelCase_ ( self) -> Union[str, Any]: return functools.reduce(operator.mul , self.conv_stride , 1)
88
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
1
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _snake_case ( __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" assert isinstance(__snake_case , __snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _snake_case ( __snake_case : int , __snake_case : List[str] , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = tmp_path / """cache""" _lowerCamelCase : int = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : str = TextDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _snake_case ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Tuple = tmp_path / """cache""" _lowerCamelCase : Dict = {"""text""": """string"""} _lowerCamelCase : Optional[int] = features.copy() if features else default_expected_features _lowerCamelCase : Dict = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : Optional[int] = TextDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Any = tmp_path / """cache""" _lowerCamelCase : Dict = {"""text""": """string"""} _lowerCamelCase : int = TextDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def _snake_case ( __snake_case : List[Any] , __snake_case : Any , __snake_case : List[str] ): """simple docstring""" if issubclass(__snake_case , __snake_case ): _lowerCamelCase : int = text_path elif issubclass(__snake_case , __snake_case ): _lowerCamelCase : List[str] = [text_path] _lowerCamelCase : int = tmp_path / """cache""" _lowerCamelCase : List[Any] = {"""text""": """string"""} _lowerCamelCase : Any = TextDatasetReader(__snake_case , cache_dir=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) def _snake_case ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any]=("train",) ): """simple docstring""" assert isinstance(__snake_case , __snake_case ) for split in splits: _lowerCamelCase : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _snake_case ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = tmp_path / """cache""" _lowerCamelCase : Dict = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : Union[str, Any] = TextDatasetReader({"""train""": text_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read() _check_text_datasetdict(__snake_case , __snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _snake_case ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _lowerCamelCase : int = {"""text""": """string"""} _lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features _lowerCamelCase : List[Any] = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : Dict = TextDatasetReader({"""train""": text_path} , features=__snake_case , cache_dir=__snake_case ).read() _check_text_datasetdict(__snake_case , __snake_case ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _snake_case ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any ): """simple docstring""" if split: _lowerCamelCase : Any = {split: text_path} else: _lowerCamelCase : List[str] = """train""" _lowerCamelCase : List[str] = {"""train""": text_path, """test""": text_path} _lowerCamelCase : str = tmp_path / """cache""" _lowerCamelCase : Optional[Any] = {"""text""": """string"""} _lowerCamelCase : Dict = TextDatasetReader(__snake_case , cache_dir=__snake_case ).read() _check_text_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
1
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "geglu" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "layer_norm" , SCREAMING_SNAKE_CASE = False , ) -> Dict: super().__init__() _lowerCamelCase : Tuple = only_cross_attention _lowerCamelCase : int = (num_embeds_ada_norm is not None) and norm_type == """ada_norm_zero""" _lowerCamelCase : Any = (num_embeds_ada_norm is not None) and norm_type == """ada_norm""" if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to' F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.') # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: _lowerCamelCase : List[Any] = AdaLayerNorm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) elif self.use_ada_layer_norm_zero: _lowerCamelCase : str = AdaLayerNormZero(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else: _lowerCamelCase : Tuple = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = Attention( query_dim=SCREAMING_SNAKE_CASE , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=SCREAMING_SNAKE_CASE , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. _lowerCamelCase : Dict = ( AdaLayerNorm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) if self.use_ada_layer_norm else nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE) ) _lowerCamelCase : Tuple = Attention( query_dim=SCREAMING_SNAKE_CASE , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=SCREAMING_SNAKE_CASE , dim_head=SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , upcast_attention=SCREAMING_SNAKE_CASE , ) # is self-attn if encoder_hidden_states is none else: _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None # 3. Feed-forward _lowerCamelCase : int = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = FeedForward(SCREAMING_SNAKE_CASE , dropout=SCREAMING_SNAKE_CASE , activation_fn=SCREAMING_SNAKE_CASE , final_dropout=SCREAMING_SNAKE_CASE) # let chunk size default to None _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : str = 0 def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]: # Sets chunk feed-forward _lowerCamelCase : str = chunk_size _lowerCamelCase : Optional[int] = dim def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> str: # Notice that normalization is always applied before the real computation in the following blocks. # 1. Self-Attention if self.use_ada_layer_norm: _lowerCamelCase : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) elif self.use_ada_layer_norm_zero: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.norma( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hidden_dtype=hidden_states.dtype) else: _lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {} _lowerCamelCase : Any = self.attna( SCREAMING_SNAKE_CASE , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) if self.use_ada_layer_norm_zero: _lowerCamelCase : Optional[Any] = gate_msa.unsqueeze(1) * attn_output _lowerCamelCase : Tuple = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: _lowerCamelCase : Optional[Any] = ( self.norma(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) if self.use_ada_layer_norm else self.norma(SCREAMING_SNAKE_CASE) ) _lowerCamelCase : Optional[Any] = self.attna( SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Any = attn_output + hidden_states # 3. Feed-forward _lowerCamelCase : Union[str, Any] = self.norma(SCREAMING_SNAKE_CASE) if self.use_ada_layer_norm_zero: _lowerCamelCase : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.') _lowerCamelCase : Union[str, Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size _lowerCamelCase : int = torch.cat( [self.ff(SCREAMING_SNAKE_CASE) for hid_slice in norm_hidden_states.chunk(SCREAMING_SNAKE_CASE , dim=self._chunk_dim)] , dim=self._chunk_dim , ) else: _lowerCamelCase : Optional[Any] = self.ff(SCREAMING_SNAKE_CASE) if self.use_ada_layer_norm_zero: _lowerCamelCase : Any = gate_mlp.unsqueeze(1) * ff_output _lowerCamelCase : Tuple = ff_output + hidden_states return hidden_states class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = "geglu" , SCREAMING_SNAKE_CASE = False , ) -> str: super().__init__() _lowerCamelCase : Optional[int] = int(dim * mult) _lowerCamelCase : Optional[int] = dim_out if dim_out is not None else dim if activation_fn == "gelu": _lowerCamelCase : Tuple = GELU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) if activation_fn == "gelu-approximate": _lowerCamelCase : List[Any] = GELU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , approximate="""tanh""") elif activation_fn == "geglu": _lowerCamelCase : str = GEGLU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) elif activation_fn == "geglu-approximate": _lowerCamelCase : int = ApproximateGELU(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = nn.ModuleList([]) # project in self.net.append(SCREAMING_SNAKE_CASE) # project dropout self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE)) # project out self.net.append(nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Tuple: for module in self.net: _lowerCamelCase : Tuple = module(SCREAMING_SNAKE_CASE) return hidden_states class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "none") -> Optional[int]: super().__init__() _lowerCamelCase : int = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = approximate def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]: if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE , approximate=self.approximate) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : Tuple = self.proj(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = self.gelu(SCREAMING_SNAKE_CASE) return hidden_states class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]: super().__init__() _lowerCamelCase : Dict = nn.Linear(SCREAMING_SNAKE_CASE , dim_out * 2) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: if gate.device.type != "mps": return F.gelu(SCREAMING_SNAKE_CASE) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase , _lowerCamelCase : str = self.proj(SCREAMING_SNAKE_CASE).chunk(2 , dim=-1) return hidden_states * self.gelu(SCREAMING_SNAKE_CASE) class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple: super().__init__() _lowerCamelCase : Union[str, Any] = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any: _lowerCamelCase : Union[str, Any] = self.proj(SCREAMING_SNAKE_CASE) return x * torch.sigmoid(1.7_02 * x) class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]: super().__init__() _lowerCamelCase : int = nn.Embedding(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = nn.SiLU() _lowerCamelCase : Any = nn.Linear(SCREAMING_SNAKE_CASE , embedding_dim * 2) _lowerCamelCase : Optional[Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Tuple = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE))) _lowerCamelCase , _lowerCamelCase : str = torch.chunk(SCREAMING_SNAKE_CASE , 2) _lowerCamelCase : Optional[int] = self.norm(SCREAMING_SNAKE_CASE) * (1 + scale) + shift return x class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: super().__init__() _lowerCamelCase : Any = CombinedTimestepLabelEmbeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = nn.SiLU() _lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE , 6 * embedding_dim , bias=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = nn.LayerNorm(SCREAMING_SNAKE_CASE , elementwise_affine=SCREAMING_SNAKE_CASE , eps=1e-6) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> List[str]: _lowerCamelCase : Optional[int] = self.linear(self.silu(self.emb(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hidden_dtype=SCREAMING_SNAKE_CASE))) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = emb.chunk(6 , dim=1) _lowerCamelCase : List[Any] = self.norm(SCREAMING_SNAKE_CASE) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowercase__ ( nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1e-5) -> Any: super().__init__() _lowerCamelCase : Optional[int] = num_groups _lowerCamelCase : Optional[int] = eps if act_fn is None: _lowerCamelCase : Dict = None else: _lowerCamelCase : Tuple = get_activation(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = nn.Linear(SCREAMING_SNAKE_CASE , out_dim * 2) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: if self.act: _lowerCamelCase : int = self.act(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.linear(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = emb[:, :, None, None] _lowerCamelCase , _lowerCamelCase : Any = emb.chunk(2 , dim=1) _lowerCamelCase : List[str] = F.group_norm(SCREAMING_SNAKE_CASE , self.num_groups , eps=self.eps) _lowerCamelCase : Optional[Any] = x * (1 + scale) + shift return x
88
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
1
"""simple docstring""" def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : Optional[int] = 1 for i in range(1 , num + 1 ): fact *= i return fact def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : str = 0 while number > 0: _lowerCamelCase : Union[str, Any] = number % 10 sum_of_digits += last_digit _lowerCamelCase : Union[str, Any] = number // 10 # Removing the last_digit from the given number return sum_of_digits def _snake_case ( __snake_case : int = 100 ): """simple docstring""" _lowerCamelCase : List[str] = factorial(__snake_case ) _lowerCamelCase : Union[str, Any] = split_and_add(__snake_case ) return result if __name__ == "__main__": print(solution(int(input("""Enter the Number: """).strip())))
88
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
1
"""simple docstring""" def _snake_case ( __snake_case : list ): """simple docstring""" if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
88
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""", # See all REALM models at https://huggingface.co/models?filter=realm } class lowercase__ ( A_ ): __UpperCAmelCase = '''realm''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1e-3 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=1335_3718 , SCREAMING_SNAKE_CASE=5000 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> str: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) # Common config _lowerCamelCase : int = vocab_size _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Optional[int] = retriever_proj_size _lowerCamelCase : Optional[int] = num_hidden_layers _lowerCamelCase : str = num_attention_heads _lowerCamelCase : Optional[int] = num_candidates _lowerCamelCase : Tuple = intermediate_size _lowerCamelCase : Dict = hidden_act _lowerCamelCase : List[Any] = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Optional[Any] = type_vocab_size _lowerCamelCase : str = layer_norm_eps # Reader config _lowerCamelCase : str = span_hidden_size _lowerCamelCase : Any = max_span_width _lowerCamelCase : Optional[Any] = reader_layer_norm_eps _lowerCamelCase : int = reader_beam_size _lowerCamelCase : Union[str, Any] = reader_seq_len # Retrieval config _lowerCamelCase : str = num_block_records _lowerCamelCase : Tuple = searcher_beam_size
88
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , ) -> Dict: _lowerCamelCase : int = parent _lowerCamelCase : Union[str, Any] = 13 _lowerCamelCase : List[str] = 7 _lowerCamelCase : Optional[int] = True _lowerCamelCase : Dict = True _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = True _lowerCamelCase : Tuple = 99 _lowerCamelCase : Optional[Any] = 32 _lowerCamelCase : int = 2 _lowerCamelCase : Dict = 4 _lowerCamelCase : List[Any] = 37 _lowerCamelCase : Optional[int] = """gelu""" _lowerCamelCase : str = 0.1 _lowerCamelCase : Optional[Any] = 0.1 _lowerCamelCase : int = 512 _lowerCamelCase : Optional[Any] = 16 _lowerCamelCase : Tuple = 2 _lowerCamelCase : str = 0.02 _lowerCamelCase : int = 3 _lowerCamelCase : Optional[int] = 4 _lowerCamelCase : Union[str, Any] = None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : List[str] = None if self.use_input_mask: _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Optional[int] = None _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None if self.use_labels: _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices) _lowerCamelCase : Optional[int] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : List[Any] = TFDistilBertModel(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} _lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = [input_ids, input_mask] _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]: _lowerCamelCase : List[str] = TFDistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} _lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : str = TFDistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = { """input_ids""": input_ids, """attention_mask""": input_mask, } _lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : Optional[Any] = self.num_labels _lowerCamelCase : Any = TFDistilBertForSequenceClassification(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask} _lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : Dict = self.num_choices _lowerCamelCase : int = TFDistilBertForMultipleChoice(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1)) _lowerCamelCase : List[str] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1)) _lowerCamelCase : Tuple = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, } _lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : str = self.num_labels _lowerCamelCase : int = TFDistilBertForTokenClassification(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask} _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = self.prepare_config_and_inputs() ((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) : List[str] = config_and_inputs _lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = ( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) __UpperCAmelCase = ( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : str = TFDistilBertModelTester(self) _lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , dim=37) def UpperCamelCase_ ( self) -> List[Any]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[str]: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]): _lowerCamelCase : Tuple = TFDistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Union[str, Any] = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""") _lowerCamelCase : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]]) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)[0] _lowerCamelCase : str = [1, 6, 768] self.assertEqual(output.shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = tf.constant( [ [ [0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99], [0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04], [0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67], ] ]) tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)
88
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase = { """configuration_longt5""": ["""LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongT5Config""", """LongT5OnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongT5EncoderModel""", """LongT5ForConditionalGeneration""", """LongT5Model""", """LongT5PreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """FlaxLongT5ForConditionalGeneration""", """FlaxLongT5Model""", """FlaxLongT5PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase__ ( A_ ): __UpperCAmelCase = '''rwkv''' __UpperCAmelCase = {'''max_position_embeddings''': '''context_length'''} def __init__( self , SCREAMING_SNAKE_CASE=5_0277 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: _lowerCamelCase : List[str] = vocab_size _lowerCamelCase : Any = context_length _lowerCamelCase : str = hidden_size _lowerCamelCase : Union[str, Any] = num_hidden_layers _lowerCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _lowerCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _lowerCamelCase : int = layer_norm_epsilon _lowerCamelCase : int = rescale_every _lowerCamelCase : Optional[int] = use_cache _lowerCamelCase : Dict = bos_token_id _lowerCamelCase : List[Any] = eos_token_id super().__init__( tie_word_embeddings=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
1
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowercase__ ( A_ ): @slow @require_torch def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : int = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""") _lowerCamelCase : Dict = BertTokenizer.from_pretrained("""bert-base-uncased""") _lowerCamelCase : Tuple = bertabert.config.encoder.vocab_size _lowerCamelCase : int = tokenizer.sep_token_id _lowerCamelCase : List[str] = tokenizer.cls_token_id _lowerCamelCase : Optional[int] = 128 _lowerCamelCase : int = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""") _lowerCamelCase : Union[str, Any] = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""") _lowerCamelCase : List[Any] = train_dataset.select(range(32)) _lowerCamelCase : Optional[int] = val_dataset.select(range(16)) _lowerCamelCase : Any = 4 def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE): # Tokenizer will automatically set [BOS] <text> [EOS] _lowerCamelCase : List[Any] = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE , max_length=512) _lowerCamelCase : Any = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=SCREAMING_SNAKE_CASE , max_length=128) _lowerCamelCase : Optional[Any] = inputs.input_ids _lowerCamelCase : Optional[int] = inputs.attention_mask _lowerCamelCase : Union[str, Any] = outputs.input_ids _lowerCamelCase : str = outputs.input_ids.copy() _lowerCamelCase : List[Any] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""] ] _lowerCamelCase : Dict = outputs.attention_mask assert all(len(SCREAMING_SNAKE_CASE) == 512 for x in inputs.input_ids) assert all(len(SCREAMING_SNAKE_CASE) == 128 for x in outputs.input_ids) return batch def _compute_metrics(SCREAMING_SNAKE_CASE): _lowerCamelCase : Any = pred.label_ids _lowerCamelCase : Optional[int] = pred.predictions # all unnecessary tokens are removed _lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = sum([int(pred_str[i] == label_str[i]) for i in range(len(SCREAMING_SNAKE_CASE))]) / len(SCREAMING_SNAKE_CASE) return {"accuracy": accuracy} # map train dataset _lowerCamelCase : str = train_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) train_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) # same for validation dataset _lowerCamelCase : Union[str, Any] = val_dataset.map( _map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["""article""", """highlights"""] , ) val_dataset.set_format( type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , ) _lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() _lowerCamelCase : Union[str, Any] = SeqaSeqTrainingArguments( output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="""steps""" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer _lowerCamelCase : Optional[Any] = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) # start training trainer.train()
88
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
1
"""simple docstring""" import heapq import sys import numpy as np UpperCAmelCase = tuple[int, int] class lowercase__ : def __init__( self) -> Tuple: _lowerCamelCase : List[Any] = [] _lowerCamelCase : str = set() def UpperCamelCase_ ( self) -> Any: if not self.empty(): return self.elements[0][0] else: return float("""inf""") def UpperCamelCase_ ( self) -> Tuple: return len(self.elements) == 0 def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(SCREAMING_SNAKE_CASE) else: # update # print("update", item) _lowerCamelCase : Tuple = [] ((_lowerCamelCase) , (_lowerCamelCase)) : Dict = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((_lowerCamelCase) , (_lowerCamelCase)) : List[str] = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]: if item in self.set: self.set.remove(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = [] ((_lowerCamelCase) , (_lowerCamelCase)) : List[Any] = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((_lowerCamelCase) , (_lowerCamelCase)) : Dict = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def UpperCamelCase_ ( self) -> Any: return self.elements[0][1] def UpperCamelCase_ ( self) -> Optional[Any]: ((_lowerCamelCase) , (_lowerCamelCase)) : int = heapq.heappop(self.elements) self.set.remove(SCREAMING_SNAKE_CASE) return (priority, item) def _snake_case ( __snake_case : TPos , __snake_case : TPos ): """simple docstring""" _lowerCamelCase : List[str] = np.array(__snake_case ) _lowerCamelCase : str = np.array(__snake_case ) return np.linalg.norm(a - b ) def _snake_case ( __snake_case : TPos , __snake_case : TPos ): """simple docstring""" return consistent_heuristic(__snake_case , __snake_case ) // t def _snake_case ( __snake_case : TPos , __snake_case : TPos ): """simple docstring""" return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _snake_case ( __snake_case : TPos , __snake_case : int , __snake_case : TPos , __snake_case : dict[TPos, float] ): """simple docstring""" _lowerCamelCase : List[Any] = g_function[start] + Wa * heuristics[i](__snake_case , __snake_case ) return ans def _snake_case ( __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : Dict ): """simple docstring""" _lowerCamelCase : str = np.chararray((n, n) ) for i in range(__snake_case ): for j in range(__snake_case ): _lowerCamelCase : Any = """*""" for i in range(__snake_case ): for j in range(__snake_case ): if (j, (n - 1) - i) in blocks: _lowerCamelCase : Optional[int] = """#""" _lowerCamelCase : str = """-""" _lowerCamelCase : str = back_pointer[goal] while x != start: ((_lowerCamelCase) , (_lowerCamelCase)) : str = x # print(x) _lowerCamelCase : Optional[Any] = """-""" _lowerCamelCase : Optional[Any] = back_pointer[x] _lowerCamelCase : Optional[int] = """-""" for i in range(__snake_case ): for j in range(__snake_case ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) _lowerCamelCase : Dict = back_pointer[goal] while x != start: print(__snake_case , end=""" """ ) _lowerCamelCase : Tuple = back_pointer[x] print(__snake_case ) sys.exit() def _snake_case ( __snake_case : TPos ): """simple docstring""" if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _snake_case ( __snake_case : Dict , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Any , __snake_case : int , ): """simple docstring""" for itera in range(__snake_case ): open_list[itera].remove_element(__snake_case ) # print("s", s) # print("j", j) ((_lowerCamelCase) , (_lowerCamelCase)) : Any = s _lowerCamelCase : Any = (x - 1, y) _lowerCamelCase : Optional[int] = (x + 1, y) _lowerCamelCase : Union[str, Any] = (x, y + 1) _lowerCamelCase : int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(__snake_case ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(__snake_case ) _lowerCamelCase : Optional[Any] = -1 _lowerCamelCase : Tuple = float("""inf""" ) if valid(__snake_case ) and g_function[neighbours] > g_function[s] + 1: _lowerCamelCase : Dict = g_function[s] + 1 _lowerCamelCase : List[str] = s if neighbours not in close_list_anchor: open_list[0].put(__snake_case , key(__snake_case , 0 , __snake_case , __snake_case ) ) if neighbours not in close_list_inad: for var in range(1 , __snake_case ): if key(__snake_case , __snake_case , __snake_case , __snake_case ) <= Wa * key( __snake_case , 0 , __snake_case , __snake_case ): open_list[j].put( __snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) ) def _snake_case ( ): """simple docstring""" _lowerCamelCase : int = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list UpperCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCAmelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCAmelCase = make_common_ground() UpperCAmelCase = blocks_blk # hyper parameters UpperCAmelCase = 1 UpperCAmelCase = 1 UpperCAmelCase = 20 UpperCAmelCase = 3 # one consistent and two other inconsistent # start and end destination UpperCAmelCase = (0, 0) UpperCAmelCase = (n - 1, n - 1) UpperCAmelCase = 1 def _snake_case ( __snake_case : TPos , __snake_case : TPos , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = {start: 0, goal: float("""inf""" )} _lowerCamelCase : Union[str, Any] = {start: -1, goal: -1} _lowerCamelCase : Dict = [] _lowerCamelCase : Optional[Any] = set() for i in range(__snake_case ): open_list.append(PriorityQueue() ) open_list[i].put(__snake_case , key(__snake_case , __snake_case , __snake_case , __snake_case ) ) _lowerCamelCase : list[int] = [] _lowerCamelCase : list[int] = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , __snake_case ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(__snake_case , __snake_case , __snake_case ) else: _lowerCamelCase , _lowerCamelCase : int = open_list[i].top_show() visited.add(__snake_case ) expand_state( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) close_list_inad.append(__snake_case ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(__snake_case , __snake_case , __snake_case ) else: _lowerCamelCase : List[Any] = open_list[0].top_show() visited.add(__snake_case ) expand_state( __snake_case , 0 , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) close_list_anchor.append(__snake_case ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(__snake_case ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
88
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
1
"""simple docstring""" from cva import destroyAllWindows, imread, imshow, waitKey def _snake_case ( __snake_case : Optional[int] ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Any = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(__snake_case ): for j in range(__snake_case ): _lowerCamelCase : List[str] = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image UpperCAmelCase = imread("""image_data/lena.jpg""", 1) # convert to its negative UpperCAmelCase = convert_to_negative(img) # show result image imshow("""negative of original image""", img) waitKey(0) destroyAllWindows()
88
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
1
"""simple docstring""" import argparse import os import re UpperCAmelCase = """src/transformers""" # Pattern that looks at the indentation in a line. UpperCAmelCase = re.compile(r"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. UpperCAmelCase = re.compile(r"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCAmelCase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. UpperCAmelCase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCAmelCase = re.compile(r"""\[([^\]]+)\]""") def _snake_case ( __snake_case : Any ): """simple docstring""" _lowerCamelCase : Tuple = _re_indent.search(__snake_case ) return "" if search is None else search.groups()[0] def _snake_case ( __snake_case : List[Any] , __snake_case : Any="" , __snake_case : Tuple=None , __snake_case : List[str]=None ): """simple docstring""" _lowerCamelCase : List[str] = 0 _lowerCamelCase : Tuple = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(__snake_case ): index += 1 _lowerCamelCase : Union[str, Any] = ["""\n""".join(lines[:index] )] else: _lowerCamelCase : Any = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). _lowerCamelCase : Optional[int] = [lines[index]] index += 1 while index < len(__snake_case ) and (end_prompt is None or not lines[index].startswith(__snake_case )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(__snake_case ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(__snake_case ) ) if index < len(__snake_case ) - 1: _lowerCamelCase : Optional[int] = [lines[index + 1]] index += 1 else: _lowerCamelCase : int = [] else: blocks.append("""\n""".join(__snake_case ) ) _lowerCamelCase : Dict = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(__snake_case ) > 0: blocks.append("""\n""".join(__snake_case ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(__snake_case ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def _snake_case ( __snake_case : Tuple ): """simple docstring""" def _inner(__snake_case : List[str] ): return key(__snake_case ).lower().replace("""_""" , """""" ) return _inner def _snake_case ( __snake_case : Optional[int] , __snake_case : Optional[int]=None ): """simple docstring""" def noop(__snake_case : List[str] ): return x if key is None: _lowerCamelCase : Dict = noop # Constants are all uppercase, they go first. _lowerCamelCase : List[Any] = [obj for obj in objects if key(__snake_case ).isupper()] # Classes are not all uppercase but start with a capital, they go second. _lowerCamelCase : Any = [obj for obj in objects if key(__snake_case )[0].isupper() and not key(__snake_case ).isupper()] # Functions begin with a lowercase, they go last. _lowerCamelCase : Union[str, Any] = [obj for obj in objects if not key(__snake_case )[0].isupper()] _lowerCamelCase : List[Any] = ignore_underscore(__snake_case ) return sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) + sorted(__snake_case , key=__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" def _replace(__snake_case : Union[str, Any] ): _lowerCamelCase : Any = match.groups()[0] if "," not in imports: return F'[{imports}]' _lowerCamelCase : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCamelCase : Dict = keys[:-1] return "[" + ", ".join([F'"{k}"' for k in sort_objects(__snake_case )] ) + "]" _lowerCamelCase : Tuple = import_statement.split("""\n""" ) if len(__snake_case ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. _lowerCamelCase : Union[str, Any] = 2 if lines[1].strip() == """[""" else 1 _lowerCamelCase : Optional[Any] = [(i, _re_strip_line.search(__snake_case ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] _lowerCamelCase : Optional[Any] = sort_objects(__snake_case , key=lambda __snake_case : x[1] ) _lowerCamelCase : str = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(__snake_case ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: _lowerCamelCase : str = _re_bracket_content.sub(_replace , lines[1] ) else: _lowerCamelCase : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: _lowerCamelCase : str = keys[:-1] _lowerCamelCase : Optional[int] = get_indent(lines[1] ) + """, """.join([F'"{k}"' for k in sort_objects(__snake_case )] ) return "\n".join(__snake_case ) else: # Finally we have to deal with imports fitting on one line _lowerCamelCase : Optional[Any] = _re_bracket_content.sub(_replace , __snake_case ) return import_statement def _snake_case ( __snake_case : List[str] , __snake_case : Dict=True ): """simple docstring""" with open(__snake_case , encoding="""utf-8""" ) as f: _lowerCamelCase : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 _lowerCamelCase : Optional[int] = split_code_in_indented_blocks( __snake_case , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(__snake_case ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. _lowerCamelCase : Union[str, Any] = main_blocks[block_idx] _lowerCamelCase : List[Any] = block.split("""\n""" ) # Get to the start of the imports. _lowerCamelCase : List[str] = 0 while line_idx < len(__snake_case ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: _lowerCamelCase : Tuple = len(__snake_case ) else: line_idx += 1 if line_idx >= len(__snake_case ): continue # Ignore beginning and last line: they don't contain anything. _lowerCamelCase : List[str] = """\n""".join(block_lines[line_idx:-1] ) _lowerCamelCase : Union[str, Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. _lowerCamelCase : Optional[int] = split_code_in_indented_blocks(__snake_case , indent_level=__snake_case ) # We have two categories of import key: list or _import_structure[key].append/extend _lowerCamelCase : List[str] = _re_direct_key if """_import_structure = {""" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. _lowerCamelCase : List[Any] = [(pattern.search(__snake_case ).groups()[0] if pattern.search(__snake_case ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. _lowerCamelCase : Union[str, Any] = [(i, key) for i, key in enumerate(__snake_case ) if key is not None] _lowerCamelCase : Optional[Any] = [x[0] for x in sorted(__snake_case , key=lambda __snake_case : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. _lowerCamelCase : Tuple = 0 _lowerCamelCase : Dict = [] for i in range(len(__snake_case ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: _lowerCamelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(__snake_case ) count += 1 # And we put our main block back together with its first and last line. _lowerCamelCase : Tuple = """\n""".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(__snake_case ): if check_only: return True else: print(F'Overwriting {file}.' ) with open(__snake_case , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(__snake_case ) ) def _snake_case ( __snake_case : int=True ): """simple docstring""" _lowerCamelCase : Dict = [] for root, _, files in os.walk(__snake_case ): if "__init__.py" in files: _lowerCamelCase : int = sort_imports(os.path.join(__snake_case , """__init__.py""" ) , check_only=__snake_case ) if result: _lowerCamelCase : str = [os.path.join(__snake_case , """__init__.py""" )] if len(__snake_case ) > 0: raise ValueError(F'Would overwrite {len(__snake_case )} files, run `make style`.' ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") UpperCAmelCase = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
88
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
1
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class lowercase__ ( A_ ,A_ ): @register_to_config def __init__( self , SCREAMING_SNAKE_CASE = 768 , ) -> List[Any]: super().__init__() _lowerCamelCase : str = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE)) _lowerCamelCase : Optional[int] = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , ) -> List[str]: _lowerCamelCase : Dict = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)) return self def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Optional[int] = (embeds - self.mean) * 1.0 / self.std return embeds def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict: _lowerCamelCase : int = (embeds * self.std) + self.mean return embeds
88
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
1
"""simple docstring""" import os from pathlib import Path def _snake_case ( ): """simple docstring""" from torch.utils.cpp_extension import load _lowerCamelCase : Optional[Any] = Path(__snake_case ).resolve().parent.parent.parent / """kernels""" / """deformable_detr""" _lowerCamelCase : int = [ root / filename for filename in [ """vision.cpp""", os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ), os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ), ] ] load( """MultiScaleDeformableAttention""" , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[ """-DCUDA_HAS_FP16=1""", """-D__CUDA_NO_HALF_OPERATORS__""", """-D__CUDA_NO_HALF_CONVERSIONS__""", """-D__CUDA_NO_HALF2_OPERATORS__""", ] , ) import MultiScaleDeformableAttention as MSDA return MSDA
88
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """spiece.model"""} UpperCAmelCase = { """vocab_file""": { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""", } } UpperCAmelCase = { """albert-base-v1""": 512, """albert-large-v1""": 512, """albert-xlarge-v1""": 512, """albert-xxlarge-v1""": 512, """albert-base-v2""": 512, """albert-large-v2""": 512, """albert-xlarge-v2""": 512, """albert-xxlarge-v2""": 512, } UpperCAmelCase = """▁""" class lowercase__ ( A_ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _lowerCamelCase : Optional[int] = ( AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE , normalized=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token ) _lowerCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Optional[int] = do_lower_case _lowerCamelCase : Any = remove_space _lowerCamelCase : Any = keep_accents _lowerCamelCase : str = vocab_file _lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(SCREAMING_SNAKE_CASE) @property def UpperCamelCase_ ( self) -> int: return len(self.sp_model) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> int: _lowerCamelCase : Any = self.__dict__.copy() _lowerCamelCase : Any = None return state def __setstate__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : List[Any] = {} _lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]: if self.remove_space: _lowerCamelCase : Union[str, Any] = """ """.join(inputs.strip().split()) else: _lowerCamelCase : str = inputs _lowerCamelCase : Optional[Any] = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""") if not self.keep_accents: _lowerCamelCase : Tuple = unicodedata.normalize("""NFKD""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = """""".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE)]) if self.do_lower_case: _lowerCamelCase : Optional[Any] = outputs.lower() return outputs def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = self.preprocess_text(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE) _lowerCamelCase : str = [] for piece in pieces: if len(SCREAMING_SNAKE_CASE) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit(): _lowerCamelCase : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , """""")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: _lowerCamelCase : int = cur_pieces[1:] else: _lowerCamelCase : Union[str, Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(SCREAMING_SNAKE_CASE) else: new_pieces.append(SCREAMING_SNAKE_CASE) return new_pieces def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict: return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : Tuple = """""" _lowerCamelCase : Tuple = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token _lowerCamelCase : List[str] = True _lowerCamelCase : int = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = False out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) return out_string.strip() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : List[Any] = [self.sep_token_id] _lowerCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE) if token_ids_a is not None: return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1] return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : Optional[int] = [self.sep_token_id] _lowerCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return _lowerCamelCase : Optional[int] = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE , """wb""") as fi: _lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE) return (out_vocab_file,)
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase__ ( A_ ): __UpperCAmelCase = '''facebook/bart-large-mnli''' __UpperCAmelCase = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) __UpperCAmelCase = '''text_classifier''' __UpperCAmelCase = AutoTokenizer __UpperCAmelCase = AutoModelForSequenceClassification __UpperCAmelCase = ['''text''', ['''text''']] __UpperCAmelCase = ['''text'''] def UpperCamelCase_ ( self) -> Union[str, Any]: super().setup() _lowerCamelCase : List[Any] = self.model.config _lowerCamelCase : Union[str, Any] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _lowerCamelCase : Tuple = int(SCREAMING_SNAKE_CASE) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any: _lowerCamelCase : Union[str, Any] = labels return self.pre_processor( [text] * len(SCREAMING_SNAKE_CASE) , [F'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any: _lowerCamelCase : int = outputs.logits _lowerCamelCase : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
88
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
1
"""simple docstring""" from __future__ import annotations def _snake_case ( __snake_case : list[list[int]] ): """simple docstring""" _lowerCamelCase : Dict = len(__snake_case ) # We need to create solution object to save path. _lowerCamelCase : int = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )] _lowerCamelCase : str = run_maze(__snake_case , 0 , 0 , __snake_case ) if solved: print("""\n""".join(str(__snake_case ) for row in solutions ) ) else: print("""No solution exists!""" ) return solved def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[list[int]] ): """simple docstring""" _lowerCamelCase : Optional[Any] = len(__snake_case ) # Final check point. if i == j == (size - 1): _lowerCamelCase : List[str] = 1 return True _lowerCamelCase : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds _lowerCamelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. _lowerCamelCase : List[str] = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited _lowerCamelCase : Union[str, Any] = 1 # check for directions if ( run_maze(__snake_case , i + 1 , __snake_case , __snake_case ) or run_maze(__snake_case , __snake_case , j + 1 , __snake_case ) or run_maze(__snake_case , i - 1 , __snake_case , __snake_case ) or run_maze(__snake_case , __snake_case , j - 1 , __snake_case ) ): return True _lowerCamelCase : str = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
1
"""simple docstring""" import argparse import collections import json import os import re import string import sys import numpy as np UpperCAmelCase = re.compile(r"""\b(a|an|the)\b""", re.UNICODE) UpperCAmelCase = None def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[Any] = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" ) parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" ) parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" ) parser.add_argument( """--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" ) parser.add_argument( """--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" ) parser.add_argument( """--na-prob-thresh""" , """-t""" , type=__snake_case , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , ) parser.add_argument( """--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__snake_case , help="""Save precision-recall curves to directory.""" ) parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" ) if len(sys.argv ) == 1: parser.print_help() sys.exit(1 ) return parser.parse_args() def _snake_case ( __snake_case : Dict ): """simple docstring""" _lowerCamelCase : Dict = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _lowerCamelCase : Optional[Any] = bool(qa["""answers"""]["""text"""] ) return qid_to_has_ans def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" def remove_articles(__snake_case : List[str] ): return ARTICLES_REGEX.sub(""" """ , __snake_case ) def white_space_fix(__snake_case : Optional[int] ): return " ".join(text.split() ) def remove_punc(__snake_case : Optional[Any] ): _lowerCamelCase : str = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(__snake_case : Tuple ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) ) def _snake_case ( __snake_case : Any ): """simple docstring""" if not s: return [] return normalize_answer(__snake_case ).split() def _snake_case ( __snake_case : Dict , __snake_case : int ): """simple docstring""" return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) ) def _snake_case ( __snake_case : Tuple , __snake_case : Any ): """simple docstring""" _lowerCamelCase : Optional[int] = get_tokens(__snake_case ) _lowerCamelCase : int = get_tokens(__snake_case ) _lowerCamelCase : Optional[Any] = collections.Counter(__snake_case ) & collections.Counter(__snake_case ) _lowerCamelCase : int = sum(common.values() ) if len(__snake_case ) == 0 or len(__snake_case ) == 0: # If either is no-answer, then F1 is 1 if they agree, 0 otherwise return int(gold_toks == pred_toks ) if num_same == 0: return 0 _lowerCamelCase : List[str] = 1.0 * num_same / len(__snake_case ) _lowerCamelCase : Dict = 1.0 * num_same / len(__snake_case ) _lowerCamelCase : int = (2 * precision * recall) / (precision + recall) return fa def _snake_case ( __snake_case : Optional[Any] , __snake_case : str ): """simple docstring""" _lowerCamelCase : List[Any] = {} _lowerCamelCase : Optional[int] = {} for article in dataset: for p in article["paragraphs"]: for qa in p["qas"]: _lowerCamelCase : Tuple = qa["""id"""] _lowerCamelCase : Optional[int] = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__snake_case )] if not gold_answers: # For unanswerable questions, only correct answer is empty string _lowerCamelCase : List[str] = [""""""] if qid not in preds: print(F'Missing prediction for {qid}' ) continue _lowerCamelCase : Any = preds[qid] # Take max over all gold answers _lowerCamelCase : Tuple = max(compute_exact(__snake_case , __snake_case ) for a in gold_answers ) _lowerCamelCase : List[str] = max(compute_fa(__snake_case , __snake_case ) for a in gold_answers ) return exact_scores, fa_scores def _snake_case ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = {} for qid, s in scores.items(): _lowerCamelCase : Optional[Any] = na_probs[qid] > na_prob_thresh if pred_na: _lowerCamelCase : Any = float(not qid_to_has_ans[qid] ) else: _lowerCamelCase : List[str] = s return new_scores def _snake_case ( __snake_case : Any , __snake_case : int , __snake_case : Any=None ): """simple docstring""" if not qid_list: _lowerCamelCase : int = len(__snake_case ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores.values() ) / total), ("""f1""", 100.0 * sum(fa_scores.values() ) / total), ("""total""", total), ] ) else: _lowerCamelCase : List[str] = len(__snake_case ) return collections.OrderedDict( [ ("""exact""", 100.0 * sum(exact_scores[k] for k in qid_list ) / total), ("""f1""", 100.0 * sum(fa_scores[k] for k in qid_list ) / total), ("""total""", total), ] ) def _snake_case ( __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : int ): """simple docstring""" for k in new_eval: _lowerCamelCase : Optional[int] = new_eval[k] def _snake_case ( __snake_case : Tuple , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Tuple ): """simple docstring""" plt.step(__snake_case , __snake_case , color="""b""" , alpha=0.2 , where="""post""" ) plt.fill_between(__snake_case , __snake_case , step="""post""" , alpha=0.2 , color="""b""" ) plt.xlabel("""Recall""" ) plt.ylabel("""Precision""" ) plt.xlim([0.0, 1.05] ) plt.ylim([0.0, 1.05] ) plt.title(__snake_case ) plt.savefig(__snake_case ) plt.clf() def _snake_case ( __snake_case : int , __snake_case : Dict , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]=None , __snake_case : List[Any]=None ): """simple docstring""" _lowerCamelCase : Any = sorted(__snake_case , key=lambda __snake_case : na_probs[k] ) _lowerCamelCase : List[Any] = 0.0 _lowerCamelCase : Optional[Any] = 1.0 _lowerCamelCase : Tuple = 0.0 _lowerCamelCase : Tuple = [1.0] _lowerCamelCase : List[Any] = [0.0] _lowerCamelCase : Optional[int] = 0.0 for i, qid in enumerate(__snake_case ): if qid_to_has_ans[qid]: true_pos += scores[qid] _lowerCamelCase : List[str] = true_pos / float(i + 1 ) _lowerCamelCase : Optional[int] = true_pos / float(__snake_case ) if i == len(__snake_case ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]: # i.e., if we can put a threshold after this point avg_prec += cur_p * (cur_r - recalls[-1]) precisions.append(__snake_case ) recalls.append(__snake_case ) if out_image: plot_pr_curve(__snake_case , __snake_case , __snake_case , __snake_case ) return {"ap": 100.0 * avg_prec} def _snake_case ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Optional[Any] ): """simple docstring""" if out_image_dir and not os.path.exists(__snake_case ): os.makedirs(__snake_case ) _lowerCamelCase : str = sum(1 for v in qid_to_has_ans.values() if v ) if num_true_pos == 0: return _lowerCamelCase : List[str] = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , ) _lowerCamelCase : int = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , ) _lowerCamelCase : Union[str, Any] = {k: float(__snake_case ) for k, v in qid_to_has_ans.items()} _lowerCamelCase : Optional[Any] = make_precision_recall_eval( __snake_case , __snake_case , __snake_case , __snake_case , out_image=os.path.join(__snake_case , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , ) merge_eval(__snake_case , __snake_case , """pr_exact""" ) merge_eval(__snake_case , __snake_case , """pr_f1""" ) merge_eval(__snake_case , __snake_case , """pr_oracle""" ) def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : str , __snake_case : Optional[int] ): """simple docstring""" if not qid_list: return _lowerCamelCase : Tuple = [na_probs[k] for k in qid_list] _lowerCamelCase : int = np.ones_like(__snake_case ) / float(len(__snake_case ) ) plt.hist(__snake_case , weights=__snake_case , bins=20 , range=(0.0, 1.0) ) plt.xlabel("""Model probability of no-answer""" ) plt.ylabel("""Proportion of dataset""" ) plt.title(F'Histogram of no-answer probability: {name}' ) plt.savefig(os.path.join(__snake_case , F'na_prob_hist_{name}.png' ) ) plt.clf() def _snake_case ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Any ): """simple docstring""" _lowerCamelCase : int = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] ) _lowerCamelCase : Optional[Any] = num_no_ans _lowerCamelCase : str = cur_score _lowerCamelCase : Dict = 0.0 _lowerCamelCase : Optional[int] = sorted(__snake_case , key=lambda __snake_case : na_probs[k] ) for i, qid in enumerate(__snake_case ): if qid not in scores: continue if qid_to_has_ans[qid]: _lowerCamelCase : List[str] = scores[qid] else: if preds[qid]: _lowerCamelCase : str = -1 else: _lowerCamelCase : List[str] = 0 cur_score += diff if cur_score > best_score: _lowerCamelCase : Any = cur_score _lowerCamelCase : Dict = na_probs[qid] return 100.0 * best_score / len(__snake_case ), best_thresh def _snake_case ( __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Dict ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : str = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCamelCase , _lowerCamelCase : List[Any] = find_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCamelCase : Dict = best_exact _lowerCamelCase : Optional[int] = exact_thresh _lowerCamelCase : Any = best_fa _lowerCamelCase : Any = fa_thresh def _snake_case ( ): """simple docstring""" with open(OPTS.data_file ) as f: _lowerCamelCase : Union[str, Any] = json.load(__snake_case ) _lowerCamelCase : Dict = dataset_json["""data"""] with open(OPTS.pred_file ) as f: _lowerCamelCase : Union[str, Any] = json.load(__snake_case ) if OPTS.na_prob_file: with open(OPTS.na_prob_file ) as f: _lowerCamelCase : Any = json.load(__snake_case ) else: _lowerCamelCase : Optional[Any] = {k: 0.0 for k in preds} _lowerCamelCase : List[Any] = make_qid_to_has_ans(__snake_case ) # maps qid to True/False _lowerCamelCase : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if v] _lowerCamelCase : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v] _lowerCamelCase , _lowerCamelCase : Union[str, Any] = get_raw_scores(__snake_case , __snake_case ) _lowerCamelCase : Dict = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh ) _lowerCamelCase : List[str] = apply_no_ans_threshold(__snake_case , __snake_case , __snake_case , OPTS.na_prob_thresh ) _lowerCamelCase : Optional[Any] = make_eval_dict(__snake_case , __snake_case ) if has_ans_qids: _lowerCamelCase : Any = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case ) merge_eval(__snake_case , __snake_case , """HasAns""" ) if no_ans_qids: _lowerCamelCase : List[Any] = make_eval_dict(__snake_case , __snake_case , qid_list=__snake_case ) merge_eval(__snake_case , __snake_case , """NoAns""" ) if OPTS.na_prob_file: find_all_best_thresh(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) if OPTS.na_prob_file and OPTS.out_image_dir: run_precision_recall_analysis(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , OPTS.out_image_dir ) histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """hasAns""" ) histogram_na_prob(__snake_case , __snake_case , OPTS.out_image_dir , """noAns""" ) if OPTS.out_file: with open(OPTS.out_file , """w""" ) as f: json.dump(__snake_case , __snake_case ) else: print(json.dumps(__snake_case , indent=2 ) ) if __name__ == "__main__": UpperCAmelCase = parse_args() if OPTS.out_image_dir: import matplotlib matplotlib.use("""Agg""") import matplotlib.pyplot as plt main()
88
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """spiece.model"""} UpperCAmelCase = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } UpperCAmelCase = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 UpperCAmelCase = 3 UpperCAmelCase = 4 class lowercase__ ( A_ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = '''left''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<sep>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<cls>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _lowerCamelCase : Any = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token _lowerCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Union[str, Any] = 3 _lowerCamelCase : Dict = do_lower_case _lowerCamelCase : str = remove_space _lowerCamelCase : Dict = keep_accents _lowerCamelCase : Optional[Any] = vocab_file _lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(SCREAMING_SNAKE_CASE) @property def UpperCamelCase_ ( self) -> List[Any]: return len(self.sp_model) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Optional[int]: _lowerCamelCase : Dict = self.__dict__.copy() _lowerCamelCase : Optional[Any] = None return state def __setstate__( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: _lowerCamelCase : str = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : Union[str, Any] = {} _lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: if self.remove_space: _lowerCamelCase : List[Any] = """ """.join(inputs.strip().split()) else: _lowerCamelCase : Dict = inputs _lowerCamelCase : Optional[int] = outputs.replace("""``""" , """\"""").replace("""''""" , """\"""") if not self.keep_accents: _lowerCamelCase : Optional[int] = unicodedata.normalize("""NFKD""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = """""".join([c for c in outputs if not unicodedata.combining(SCREAMING_SNAKE_CASE)]) if self.do_lower_case: _lowerCamelCase : Tuple = outputs.lower() return outputs def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = self.preprocess_text(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = [] for piece in pieces: if len(SCREAMING_SNAKE_CASE) > 1 and piece[-1] == str(""",""") and piece[-2].isdigit(): _lowerCamelCase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(SCREAMING_SNAKE_CASE , """""")) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: _lowerCamelCase : List[str] = cur_pieces[1:] else: _lowerCamelCase : Tuple = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(SCREAMING_SNAKE_CASE) else: new_pieces.append(SCREAMING_SNAKE_CASE) return new_pieces def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: return self.sp_model.PieceToId(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: return self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]: _lowerCamelCase : Optional[int] = """""".join(SCREAMING_SNAKE_CASE).replace(SCREAMING_SNAKE_CASE , """ """).strip() return out_string def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> str: _lowerCamelCase : Optional[int] = kwargs.pop("""use_source_tokenizer""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _lowerCamelCase : Optional[int] = [] _lowerCamelCase : str = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE)) _lowerCamelCase : int = [] sub_texts.append(SCREAMING_SNAKE_CASE) else: current_sub_text.append(SCREAMING_SNAKE_CASE) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE)) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _lowerCamelCase : Optional[int] = """""".join(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _lowerCamelCase : Tuple = self.clean_up_tokenization(SCREAMING_SNAKE_CASE) return clean_text else: return text def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : int = [self.sep_token_id] _lowerCamelCase : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE) if token_ids_a is not None: return ([0] * len(SCREAMING_SNAKE_CASE)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1, 1] return ([0] * len(SCREAMING_SNAKE_CASE)) + [1, 1] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : Union[str, Any] = [self.sep_token_id] _lowerCamelCase : List[str] = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]: if not os.path.isdir(SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return _lowerCamelCase : Optional[Any] = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE) elif not os.path.isfile(self.vocab_file): with open(SCREAMING_SNAKE_CASE , """wb""") as fi: _lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE) return (out_vocab_file,)
88
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
1
"""simple docstring""" def _snake_case ( __snake_case : int ): """simple docstring""" if divisor % 5 == 0 or divisor % 2 == 0: return 0 _lowerCamelCase : Union[str, Any] = 1 _lowerCamelCase : Optional[int] = 1 while repunit: _lowerCamelCase : Optional[int] = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _snake_case ( __snake_case : int = 1000000 ): """simple docstring""" _lowerCamelCase : Dict = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(__snake_case ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(f'''{solution() = }''')
88
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
1
"""simple docstring""" from __future__ import annotations UpperCAmelCase = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : list[int] , __snake_case : int , __snake_case : list[list[int]] , ): """simple docstring""" _lowerCamelCase : Optional[int] = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the reference grid _lowerCamelCase : Union[str, Any] = 1 _lowerCamelCase : str = [ [0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) ) ] # the action grid _lowerCamelCase : int = init[0] _lowerCamelCase : Union[str, Any] = init[1] _lowerCamelCase : Dict = 0 _lowerCamelCase : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell _lowerCamelCase : int = [[f, g, x, y]] _lowerCamelCase : Dict = False # flag that is set when search is complete _lowerCamelCase : List[str] = False # flag set if we can't find expand while not found and not resign: if len(__snake_case ) == 0: raise ValueError("""Algorithm is unable to find solution""" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() _lowerCamelCase : Any = cell.pop() _lowerCamelCase : Any = next_cell[2] _lowerCamelCase : List[str] = next_cell[3] _lowerCamelCase : str = next_cell[1] if x == goal[0] and y == goal[1]: _lowerCamelCase : int = True else: for i in range(len(__snake_case ) ): # to try out different valid actions _lowerCamelCase : int = x + DIRECTIONS[i][0] _lowerCamelCase : Optional[int] = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: _lowerCamelCase : str = g + cost _lowerCamelCase : Optional[Any] = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) _lowerCamelCase : str = 1 _lowerCamelCase : Tuple = i _lowerCamelCase : Optional[int] = [] _lowerCamelCase : List[Any] = goal[0] _lowerCamelCase : Optional[Any] = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: _lowerCamelCase : Tuple = x - DIRECTIONS[action[x][y]][0] _lowerCamelCase : List[Any] = y - DIRECTIONS[action[x][y]][1] _lowerCamelCase : Union[str, Any] = xa _lowerCamelCase : List[str] = ya invpath.append([x, y] ) _lowerCamelCase : Optional[Any] = [] for i in range(len(__snake_case ) ): path.append(invpath[len(__snake_case ) - 1 - i] ) return path, action if __name__ == "__main__": UpperCAmelCase = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] UpperCAmelCase = [0, 0] # all coordinates are given in format [y,x] UpperCAmelCase = [len(grid) - 1, len(grid[0]) - 1] UpperCAmelCase = 1 # the cost map which pushes the path closer to the goal UpperCAmelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): UpperCAmelCase = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map UpperCAmelCase = 99 UpperCAmelCase , UpperCAmelCase = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
88
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class lowercase__ ( A_ ,A_ ): __UpperCAmelCase = '''dinat''' __UpperCAmelCase = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , SCREAMING_SNAKE_CASE=3.0 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: super().__init__(**SCREAMING_SNAKE_CASE) _lowerCamelCase : int = patch_size _lowerCamelCase : Tuple = num_channels _lowerCamelCase : Tuple = embed_dim _lowerCamelCase : str = depths _lowerCamelCase : int = len(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = num_heads _lowerCamelCase : List[Any] = kernel_size _lowerCamelCase : int = dilations _lowerCamelCase : Union[str, Any] = mlp_ratio _lowerCamelCase : Any = qkv_bias _lowerCamelCase : Any = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : Optional[int] = drop_path_rate _lowerCamelCase : str = hidden_act _lowerCamelCase : Union[str, Any] = layer_norm_eps _lowerCamelCase : Optional[int] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _lowerCamelCase : List[str] = int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE) - 1)) _lowerCamelCase : List[str] = layer_scale_init_value _lowerCamelCase : Any = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(SCREAMING_SNAKE_CASE) + 1)] _lowerCamelCase , _lowerCamelCase : int = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , stage_names=self.stage_names)
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
1
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = LEDTokenizer __UpperCAmelCase = LEDTokenizerFast __UpperCAmelCase = True def UpperCamelCase_ ( self) -> int: super().setUp() _lowerCamelCase : Optional[int] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] _lowerCamelCase : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE)))) _lowerCamelCase : int = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] _lowerCamelCase : Dict = {"""unk_token""": """<unk>"""} _lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) _lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE) + """\n""") with open(self.merges_file , """w""" , encoding="""utf-8""") as fp: fp.write("""\n""".join(SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> List[Any]: kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[Any]: kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: return "lower newer", "lower newer" @cached_property def UpperCamelCase_ ( self) -> Optional[Any]: return LEDTokenizer.from_pretrained("""allenai/led-base-16384""") @cached_property def UpperCamelCase_ ( self) -> Optional[int]: return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""") @require_torch def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] _lowerCamelCase : List[str] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : List[str] = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE) , padding=SCREAMING_SNAKE_CASE , return_tensors="""pt""") self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertEqual((2, 9) , batch.input_ids.shape) self.assertEqual((2, 9) , batch.attention_mask.shape) _lowerCamelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @require_torch def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="""pt""") self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE) self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE) self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE) self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE) @require_torch def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : Dict = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : int = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=32 , padding="""max_length""" , return_tensors="""pt""") self.assertEqual(32 , targets["""input_ids"""].shape[1]) @require_torch def UpperCamelCase_ ( self) -> int: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Union[str, Any] = tokenizer( ["""I am a small frog""" * 1024, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors="""pt""") self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertEqual(batch.input_ids.shape , (2, 5122)) @require_torch def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = ["""A long paragraph for summarization."""] _lowerCamelCase : Optional[int] = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""pt""") _lowerCamelCase : Optional[int] = tokenizer(text_target=SCREAMING_SNAKE_CASE , return_tensors="""pt""") _lowerCamelCase : Optional[Any] = inputs["""input_ids"""] _lowerCamelCase : Any = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def UpperCamelCase_ ( self) -> List[Any]: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: _lowerCamelCase : List[Any] = ["""Summary of the text.""", """Another summary."""] _lowerCamelCase : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] _lowerCamelCase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [[0] * len(SCREAMING_SNAKE_CASE) for x in encoded_output["""input_ids"""]] _lowerCamelCase : List[str] = tokenizer.pad(SCREAMING_SNAKE_CASE) self.assertSequenceEqual(outputs["""global_attention_mask"""] , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> str: pass def UpperCamelCase_ ( self) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'): _lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : int = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """A, <mask> AllenNLP sentence.""" _lowerCamelCase : Optional[Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE) self.assertEqual(sum(tokens_r["""token_type_ids"""]) , sum(tokens_p["""token_type_ids"""])) self.assertEqual( sum(tokens_r["""attention_mask"""]) / len(tokens_r["""attention_mask"""]) , sum(tokens_p["""attention_mask"""]) / len(tokens_p["""attention_mask"""]) , ) _lowerCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""]) _lowerCamelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""]) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2]) self.assertSequenceEqual( SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""]) self.assertSequenceEqual( SCREAMING_SNAKE_CASE , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""])
88
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
1
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = 42 class lowercase__ ( A_ ,A_ ): @register_to_config def __init__( self , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE = (64,) , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = "silu" , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.1_82_15 , SCREAMING_SNAKE_CASE = "group" , ) -> Tuple: super().__init__() # pass init params to Encoder _lowerCamelCase : List[str] = Encoder( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , down_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , double_z=SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Tuple = vq_embed_dim if vq_embed_dim is not None else latent_channels _lowerCamelCase : Optional[Any] = nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1) _lowerCamelCase : str = VectorQuantizer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beta=0.25 , remap=SCREAMING_SNAKE_CASE , sane_index_shape=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = nn.Convad(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1) # pass init params to Decoder _lowerCamelCase : int = Decoder( in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , up_block_types=SCREAMING_SNAKE_CASE , block_out_channels=SCREAMING_SNAKE_CASE , layers_per_block=SCREAMING_SNAKE_CASE , act_fn=SCREAMING_SNAKE_CASE , norm_num_groups=SCREAMING_SNAKE_CASE , norm_type=SCREAMING_SNAKE_CASE , ) @apply_forward_hook def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True) -> VQEncoderOutput: _lowerCamelCase : Optional[Any] = self.encoder(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = self.quant_conv(SCREAMING_SNAKE_CASE) if not return_dict: return (h,) return VQEncoderOutput(latents=SCREAMING_SNAKE_CASE) @apply_forward_hook def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]: # also go through quantization layer if not force_not_quantize: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.quantize(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : int = h _lowerCamelCase : List[Any] = self.post_quant_conv(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = self.decoder(SCREAMING_SNAKE_CASE , quant if self.config.norm_type == """spatial""" else None) if not return_dict: return (dec,) return DecoderOutput(sample=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = True) -> Union[DecoderOutput, torch.FloatTensor]: _lowerCamelCase : Dict = sample _lowerCamelCase : Any = self.encode(SCREAMING_SNAKE_CASE).latents _lowerCamelCase : Optional[int] = self.decode(SCREAMING_SNAKE_CASE).sample if not return_dict: return (dec,) return DecoderOutput(sample=SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable from typing import Generic, TypeVar UpperCAmelCase = TypeVar("""T""") UpperCAmelCase = TypeVar("""U""") class lowercase__ ( Generic[T, U] ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Any = key _lowerCamelCase : Any = val _lowerCamelCase : DoubleLinkedListNode[T, U] | None = None _lowerCamelCase : DoubleLinkedListNode[T, U] | None = None def __repr__( self) -> str: return ( F'Node: key: {self.key}, val: {self.val}, ' F'has next: {bool(self.next)}, has prev: {bool(self.prev)}' ) class lowercase__ ( Generic[T, U] ): def __init__( self) -> None: _lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase , _lowerCamelCase : Optional[int] = self.rear, self.head def __repr__( self) -> str: _lowerCamelCase : Union[str, Any] = ["""DoubleLinkedList"""] _lowerCamelCase : List[Any] = self.head while node.next is not None: rep.append(str(SCREAMING_SNAKE_CASE)) _lowerCamelCase : Optional[Any] = node.next rep.append(str(self.rear)) return ",\n ".join(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : Dict = self.rear.prev # All nodes other than self.head are guaranteed to have non-None previous assert previous is not None _lowerCamelCase : Optional[int] = node _lowerCamelCase : List[str] = previous _lowerCamelCase : Dict = node _lowerCamelCase : str = self.rear def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> DoubleLinkedListNode[T, U] | None: if node.prev is None or node.next is None: return None _lowerCamelCase : Dict = node.next _lowerCamelCase : int = node.prev _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Any = None return node class lowercase__ ( Generic[T, U] ): __UpperCAmelCase = {} def __init__( self , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList() _lowerCamelCase : Optional[int] = capacity _lowerCamelCase : int = 0 _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {} def __repr__( self) -> str: return ( F'CacheInfo(hits={self.hits}, misses={self.miss}, ' F'capacity={self.capacity}, current size={self.num_keys})' ) def __contains__( self , SCREAMING_SNAKE_CASE) -> bool: return key in self.cache def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> U | None: # Note: pythonic interface would throw KeyError rather than return None if key in self.cache: self.hits += 1 _lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key] _lowerCamelCase : Dict = self.list.remove(self.cache[key]) assert node == value_node # node is guaranteed not None because it is in self.cache assert node is not None self.list.add(SCREAMING_SNAKE_CASE) return node.val self.miss += 1 return None def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> None: if key not in self.cache: if self.num_keys >= self.capacity: # delete first node (oldest) when over capacity _lowerCamelCase : int = self.list.head.next # guaranteed to have a non-None first node when num_keys > 0 # explain to type checker via assertions assert first_node is not None assert first_node.key is not None assert ( self.list.remove(SCREAMING_SNAKE_CASE) is not None ) # node guaranteed to be in list assert node.key is not None del self.cache[first_node.key] self.num_keys -= 1 _lowerCamelCase : str = DoubleLinkedListNode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.list.add(self.cache[key]) self.num_keys += 1 else: # bump node to the end of the list, update value _lowerCamelCase : List[str] = self.list.remove(self.cache[key]) assert node is not None # node guaranteed to be in list _lowerCamelCase : Union[str, Any] = value self.list.add(SCREAMING_SNAKE_CASE) @classmethod def UpperCamelCase_ ( cls , SCREAMING_SNAKE_CASE = 128) -> Callable[[Callable[[T], U]], Callable[..., U]]: def cache_decorator_inner(SCREAMING_SNAKE_CASE) -> Callable[..., U]: def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE) -> U: if func not in cls.decorator_function_to_instance_map: _lowerCamelCase : Union[str, Any] = LRUCache(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = cls.decorator_function_to_instance_map[func].get(args[0]) if result is None: _lowerCamelCase : Any = func(*SCREAMING_SNAKE_CASE) cls.decorator_function_to_instance_map[func].put(args[0] , SCREAMING_SNAKE_CASE) return result def cache_info() -> LRUCache[T, U]: return cls.decorator_function_to_instance_map[func] setattr(SCREAMING_SNAKE_CASE , """cache_info""" , SCREAMING_SNAKE_CASE) # noqa: B010 return cache_decorator_wrapper return cache_decorator_inner if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
1
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError import requests def _snake_case ( __snake_case : str = "isbn/0140328726" ): """simple docstring""" _lowerCamelCase : Dict = olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes if new_olid.count("""/""" ) != 1: _lowerCamelCase : int = F'{olid} is not a valid Open Library olid' raise ValueError(__snake_case ) return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json() def _snake_case ( __snake_case : dict ): """simple docstring""" _lowerCamelCase : List[str] = { """title""": """Title""", """publish_date""": """Publish date""", """authors""": """Authors""", """number_of_pages""": """Number of pages:""", """first_sentence""": """First sentence""", """isbn_10""": """ISBN (10)""", """isbn_13""": """ISBN (13)""", } _lowerCamelCase : Any = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()} _lowerCamelCase : int = [ get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""] ] _lowerCamelCase : Any = data["""First sentence"""]["""value"""] for key, value in data.items(): if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Tuple = """, """.join(__snake_case ) return data if __name__ == "__main__": import doctest doctest.testmod() while True: UpperCAmelCase = input("""\nEnter the ISBN code to search (or 'quit' to stop): """).strip() if isbn.lower() in ("", "q", "quit", "exit", "stop"): break if len(isbn) not in (10, 13) or not isbn.isdigit(): print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''') continue print(f'''\nSearching Open Library for ISBN: {isbn}...\n''') try: UpperCAmelCase = summarize_book(get_openlibrary_data(f'''isbn/{isbn}''')) print("""\n""".join(f'''{key}: {value}''' for key, value in book_summary.items())) except JSONDecodeError: # Workaround for requests.exceptions.RequestException: print(f'''Sorry, there are no results for ISBN: {isbn}.''')
88
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
1
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowercase__ ( A_ ): __UpperCAmelCase = ['''image_processor''', '''tokenizer'''] __UpperCAmelCase = '''AutoImageProcessor''' __UpperCAmelCase = '''AutoTokenizer''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: super().__init__(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = self.image_processor def __call__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE) -> str: if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _lowerCamelCase : str = self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) if images is not None: _lowerCamelCase : Dict = self.image_processor(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) if text is not None and images is not None: _lowerCamelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE) , tensor_type=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> List[str]: return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Tuple: return self.tokenizer.decode(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) @property def UpperCamelCase_ ( self) -> int: return ["input_ids", "attention_mask", "pixel_values"]
88
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]: _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : Optional[Any] = batch_size _lowerCamelCase : Optional[int] = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Tuple = use_token_type_ids _lowerCamelCase : List[str] = use_labels _lowerCamelCase : Any = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : str = hidden_act _lowerCamelCase : Tuple = hidden_dropout_prob _lowerCamelCase : List[str] = attention_probs_dropout_prob _lowerCamelCase : Dict = max_position_embeddings _lowerCamelCase : str = type_vocab_size _lowerCamelCase : Any = type_sequence_label_size _lowerCamelCase : List[str] = initializer_range _lowerCamelCase : Optional[Any] = num_labels _lowerCamelCase : List[str] = num_choices _lowerCamelCase : int = scope def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _lowerCamelCase : Union[str, Any] = None if self.use_input_mask: _lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : int = None if self.use_token_type_ids: _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _lowerCamelCase : Dict = None _lowerCamelCase : Any = None _lowerCamelCase : List[Any] = None if self.use_labels: _lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices) _lowerCamelCase : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase_ ( self) -> Dict: return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = NystromformerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]: _lowerCamelCase : Union[str, Any] = NystromformerForMaskedLM(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Any: _lowerCamelCase : List[str] = NystromformerForQuestionAnswering(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Dict = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[Any]: _lowerCamelCase : Union[str, Any] = self.num_labels _lowerCamelCase : int = NystromformerForSequenceClassification(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : Tuple = self.num_labels _lowerCamelCase : int = NystromformerForTokenClassification(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : int = self.num_choices _lowerCamelCase : Any = NystromformerForMultipleChoice(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _lowerCamelCase : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _lowerCamelCase : Dict = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _lowerCamelCase : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Tuple = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Union[str, Any] = config_and_inputs _lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __UpperCAmelCase = ( { '''feature-extraction''': NystromformerModel, '''fill-mask''': NystromformerForMaskedLM, '''question-answering''': NystromformerForQuestionAnswering, '''text-classification''': NystromformerForSequenceClassification, '''token-classification''': NystromformerForTokenClassification, '''zero-shot''': NystromformerForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = NystromformerModelTester(self) _lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37) def UpperCamelCase_ ( self) -> str: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : int = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Dict = NystromformerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @require_torch class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Union[str, Any] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""") _lowerCamelCase : Union[str, Any] = torch.tensor([[0, 1, 2, 3, 4, 5]]) with torch.no_grad(): _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)[0] _lowerCamelCase : Any = torch.Size((1, 6, 768)) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = torch.tensor( [[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]]) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4)) @slow def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = """the [MASK] of Belgium is Brussels""" _lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""") _lowerCamelCase : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""") _lowerCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""pt""") with torch.no_grad(): _lowerCamelCase : Tuple = model(encoding.input_ids).logits _lowerCamelCase : str = token_logits[:, 2, :].argmax(-1)[0] self.assertEqual(tokenizer.decode(SCREAMING_SNAKE_CASE) , """capital""")
88
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
1
"""simple docstring""" from __future__ import annotations def _snake_case ( __snake_case : int , __snake_case : int ): """simple docstring""" _lowerCamelCase : list[list[int]] = [] create_all_state(1 , __snake_case , __snake_case , [] , __snake_case ) return result def _snake_case ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : list[int] , __snake_case : list[list[int]] , ): """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(__snake_case , total_number - level + 2 ): current_list.append(__snake_case ) create_all_state(i + 1 , __snake_case , level - 1 , __snake_case , __snake_case ) current_list.pop() def _snake_case ( __snake_case : list[list[int]] ): """simple docstring""" for i in total_list: print(*__snake_case ) if __name__ == "__main__": UpperCAmelCase = 4 UpperCAmelCase = 2 UpperCAmelCase = generate_all_combinations(n, k) print_all_state(total_list)
88
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
1
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
1
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
1
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ ("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""), ("""beit""", """BeitFeatureExtractor"""), ("""chinese_clip""", """ChineseCLIPFeatureExtractor"""), ("""clap""", """ClapFeatureExtractor"""), ("""clip""", """CLIPFeatureExtractor"""), ("""clipseg""", """ViTFeatureExtractor"""), ("""conditional_detr""", """ConditionalDetrFeatureExtractor"""), ("""convnext""", """ConvNextFeatureExtractor"""), ("""cvt""", """ConvNextFeatureExtractor"""), ("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""), ("""data2vec-vision""", """BeitFeatureExtractor"""), ("""deformable_detr""", """DeformableDetrFeatureExtractor"""), ("""deit""", """DeiTFeatureExtractor"""), ("""detr""", """DetrFeatureExtractor"""), ("""dinat""", """ViTFeatureExtractor"""), ("""donut-swin""", """DonutFeatureExtractor"""), ("""dpt""", """DPTFeatureExtractor"""), ("""encodec""", """EncodecFeatureExtractor"""), ("""flava""", """FlavaFeatureExtractor"""), ("""glpn""", """GLPNFeatureExtractor"""), ("""groupvit""", """CLIPFeatureExtractor"""), ("""hubert""", """Wav2Vec2FeatureExtractor"""), ("""imagegpt""", """ImageGPTFeatureExtractor"""), ("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""), ("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""), ("""levit""", """LevitFeatureExtractor"""), ("""maskformer""", """MaskFormerFeatureExtractor"""), ("""mctct""", """MCTCTFeatureExtractor"""), ("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""), ("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""), ("""mobilevit""", """MobileViTFeatureExtractor"""), ("""nat""", """ViTFeatureExtractor"""), ("""owlvit""", """OwlViTFeatureExtractor"""), ("""perceiver""", """PerceiverFeatureExtractor"""), ("""poolformer""", """PoolFormerFeatureExtractor"""), ("""regnet""", """ConvNextFeatureExtractor"""), ("""resnet""", """ConvNextFeatureExtractor"""), ("""segformer""", """SegformerFeatureExtractor"""), ("""sew""", """Wav2Vec2FeatureExtractor"""), ("""sew-d""", """Wav2Vec2FeatureExtractor"""), ("""speech_to_text""", """Speech2TextFeatureExtractor"""), ("""speecht5""", """SpeechT5FeatureExtractor"""), ("""swiftformer""", """ViTFeatureExtractor"""), ("""swin""", """ViTFeatureExtractor"""), ("""swinv2""", """ViTFeatureExtractor"""), ("""table-transformer""", """DetrFeatureExtractor"""), ("""timesformer""", """VideoMAEFeatureExtractor"""), ("""tvlt""", """TvltFeatureExtractor"""), ("""unispeech""", """Wav2Vec2FeatureExtractor"""), ("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""), ("""van""", """ConvNextFeatureExtractor"""), ("""videomae""", """VideoMAEFeatureExtractor"""), ("""vilt""", """ViltFeatureExtractor"""), ("""vit""", """ViTFeatureExtractor"""), ("""vit_mae""", """ViTFeatureExtractor"""), ("""vit_msn""", """ViTFeatureExtractor"""), ("""wav2vec2""", """Wav2Vec2FeatureExtractor"""), ("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""), ("""wavlm""", """Wav2Vec2FeatureExtractor"""), ("""whisper""", """WhisperFeatureExtractor"""), ("""xclip""", """CLIPFeatureExtractor"""), ("""yolos""", """YolosFeatureExtractor"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def _snake_case ( __snake_case : str ): """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _lowerCamelCase : str = model_type_to_module_name(__snake_case ) _lowerCamelCase : str = importlib.import_module(F'.{module_name}' , """transformers.models""" ) try: return getattr(__snake_case , __snake_case ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(__snake_case , """__name__""" , __snake_case ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _lowerCamelCase : List[str] = importlib.import_module("""transformers""" ) if hasattr(__snake_case , __snake_case ): return getattr(__snake_case , __snake_case ) return None def _snake_case ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Tuple , ): """simple docstring""" _lowerCamelCase : Optional[Any] = get_file_from_repo( __snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , ) if resolved_config_file is None: logger.info( """Could not locate the feature extractor configuration file, will try to use the model config instead.""" ) return {} with open(__snake_case , encoding="""utf-8""" ) as reader: return json.load(__snake_case ) class lowercase__ : def __init__( self) -> Union[str, Any]: raise EnvironmentError( """AutoFeatureExtractor is designed to be instantiated """ """using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.""") @classmethod @replace_list_option_in_docstrings(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Dict: _lowerCamelCase : List[Any] = kwargs.pop("""config""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : str = kwargs.pop("""trust_remote_code""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = True _lowerCamelCase , _lowerCamelCase : Union[str, Any] = FeatureExtractionMixin.get_feature_extractor_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = config_dict.get("""feature_extractor_type""" , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = None if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {}): _lowerCamelCase : List[str] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): _lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) # It could be in `config.feature_extractor_type`` _lowerCamelCase : Tuple = getattr(SCREAMING_SNAKE_CASE , """feature_extractor_type""" , SCREAMING_SNAKE_CASE) if hasattr(SCREAMING_SNAKE_CASE , """auto_map""") and "AutoFeatureExtractor" in config.auto_map: _lowerCamelCase : Optional[Any] = config.auto_map["""AutoFeatureExtractor"""] if feature_extractor_class is not None: _lowerCamelCase : Dict = feature_extractor_class_from_name(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = feature_extractor_auto_map is not None _lowerCamelCase : Optional[Any] = feature_extractor_class is not None or type(SCREAMING_SNAKE_CASE) in FEATURE_EXTRACTOR_MAPPING _lowerCamelCase : str = resolve_trust_remote_code( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) if has_remote_code and trust_remote_code: _lowerCamelCase : List[str] = get_class_from_dynamic_module( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = kwargs.pop("""code_revision""" , SCREAMING_SNAKE_CASE) if os.path.isdir(SCREAMING_SNAKE_CASE): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(SCREAMING_SNAKE_CASE) in FEATURE_EXTRACTOR_MAPPING: _lowerCamelCase : Union[str, Any] = FEATURE_EXTRACTOR_MAPPING[type(SCREAMING_SNAKE_CASE)] return feature_extractor_class.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) raise ValueError( F'Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ' F'`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ' F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}') @staticmethod def UpperCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]: FEATURE_EXTRACTOR_MAPPING.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
88
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
1
"""simple docstring""" import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = """▁""" UpperCAmelCase = { """vocab_file""": """vocab.json""", """spm_file""": """sentencepiece.bpe.model""", """tokenizer_config_file""": """tokenizer_config.json""", } UpperCAmelCase = { """vocab_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""", }, """spm_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_config_file""": { """facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""", """facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""", }, } UpperCAmelCase = { """facebook/m2m100_418M""": 1024, } # fmt: off UpperCAmelCase = { """m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""], """wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""] } class lowercase__ ( A_ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = ['''input_ids''', '''attention_mask'''] __UpperCAmelCase = [] __UpperCAmelCase = [] def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="m2m100" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=8 , **SCREAMING_SNAKE_CASE , ) -> None: _lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs _lowerCamelCase : List[Any] = language_codes _lowerCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes] _lowerCamelCase : str = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} _lowerCamelCase : Optional[Any] = kwargs.get("""additional_special_tokens""" , []) kwargs["additional_special_tokens"] += [ self.get_lang_token(SCREAMING_SNAKE_CASE) for lang_code in fairseq_language_code if self.get_lang_token(SCREAMING_SNAKE_CASE) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , language_codes=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Union[str, Any] = vocab_file _lowerCamelCase : str = load_json(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = {v: k for k, v in self.encoder.items()} _lowerCamelCase : Optional[int] = spm_file _lowerCamelCase : Union[str, Any] = load_spm(SCREAMING_SNAKE_CASE , self.sp_model_kwargs) _lowerCamelCase : Optional[int] = len(self.encoder) _lowerCamelCase : Dict = { self.get_lang_token(SCREAMING_SNAKE_CASE): self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE) } _lowerCamelCase : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE)} _lowerCamelCase : str = {v: k for k, v in self.lang_token_to_id.items()} _lowerCamelCase : List[str] = src_lang if src_lang is not None else """en""" _lowerCamelCase : List[str] = tgt_lang _lowerCamelCase : List[str] = self.get_lang_id(self._src_lang) self.set_src_lang_special_tokens(self._src_lang) _lowerCamelCase : Dict = num_madeup_words @property def UpperCamelCase_ ( self) -> int: return len(self.encoder) + len(self.lang_token_to_id) @property def UpperCamelCase_ ( self) -> str: return self._src_lang @src_lang.setter def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder[self.unk_token]) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str: if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict: _lowerCamelCase : Tuple = [] _lowerCamelCase : Optional[int] = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token _lowerCamelCase : Tuple = [] else: current_sub_tokens.append(SCREAMING_SNAKE_CASE) out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) return out_string.strip() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens) _lowerCamelCase : Dict = [1] * len(self.suffix_tokens) if token_ids_a is None: return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Dict: _lowerCamelCase : str = self.__dict__.copy() _lowerCamelCase : List[Any] = None return state def __setstate__( self , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : Any = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _lowerCamelCase : int = {} _lowerCamelCase : Any = load_spm(self.spm_file , self.sp_model_kwargs) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]: _lowerCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory') _lowerCamelCase : Any = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""] ) _lowerCamelCase : Optional[Any] = save_dir / ( (filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""] ) save_json(self.encoder , SCREAMING_SNAKE_CASE) if os.path.abspath(self.spm_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.spm_file): copyfile(self.spm_file , SCREAMING_SNAKE_CASE) elif not os.path.isfile(self.spm_file): with open(SCREAMING_SNAKE_CASE , """wb""") as fi: _lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(SCREAMING_SNAKE_CASE) return (str(SCREAMING_SNAKE_CASE), str(SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "en" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "ro" , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding: _lowerCamelCase : List[Any] = src_lang _lowerCamelCase : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self.src_lang) return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> List[str]: if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""") _lowerCamelCase : List[str] = src_lang _lowerCamelCase : str = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = self.get_lang_id(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = tgt_lang_id return inputs def UpperCamelCase_ ( self) -> Any: self.set_src_lang_special_tokens(self.src_lang) def UpperCamelCase_ ( self) -> Tuple: self.set_tgt_lang_special_tokens(self.tgt_lang) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : List[Any] = self.get_lang_token(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = self.lang_token_to_id[lang_token] _lowerCamelCase : str = [self.cur_lang_id] _lowerCamelCase : int = [self.eos_token_id] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None: _lowerCamelCase : int = self.get_lang_token(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = self.lang_token_to_id[lang_token] _lowerCamelCase : List[Any] = [self.cur_lang_id] _lowerCamelCase : Tuple = [self.eos_token_id] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str: return self.lang_code_to_token[lang] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = self.get_lang_token(SCREAMING_SNAKE_CASE) return self.lang_token_to_id[lang_token] def _snake_case ( __snake_case : str , __snake_case : Dict[str, Any] ): """simple docstring""" _lowerCamelCase : List[str] = sentencepiece.SentencePieceProcessor(**__snake_case ) spm.Load(str(__snake_case ) ) return spm def _snake_case ( __snake_case : str ): """simple docstring""" with open(__snake_case , """r""" ) as f: return json.load(__snake_case ) def _snake_case ( __snake_case : List[str] , __snake_case : str ): """simple docstring""" with open(__snake_case , """w""" ) as f: json.dump(__snake_case , __snake_case , indent=2 )
88
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
1
"""simple docstring""" import torch def _snake_case ( ): """simple docstring""" if torch.cuda.is_available(): _lowerCamelCase : Tuple = torch.cuda.device_count() else: _lowerCamelCase : str = 0 print(F'Successfully ran on {num_gpus} GPUs' ) if __name__ == "__main__": main()
88
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use DeformableDetrImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class lowercase__ ( A_ ): __UpperCAmelCase = '''funnel''' __UpperCAmelCase = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', } def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=[4, 4, 4] , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1e-9 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE="relative_shift" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Any = block_sizes _lowerCamelCase : int = [1] * len(SCREAMING_SNAKE_CASE) if block_repeats is None else block_repeats assert len(SCREAMING_SNAKE_CASE) == len( self.block_repeats), "`block_sizes` and `block_repeats` should have the same length." _lowerCamelCase : Tuple = num_decoder_layers _lowerCamelCase : Dict = d_model _lowerCamelCase : List[Any] = n_head _lowerCamelCase : Dict = d_head _lowerCamelCase : List[str] = d_inner _lowerCamelCase : Dict = hidden_act _lowerCamelCase : int = hidden_dropout _lowerCamelCase : Any = attention_dropout _lowerCamelCase : int = activation_dropout _lowerCamelCase : Optional[int] = initializer_range _lowerCamelCase : str = initializer_std _lowerCamelCase : Any = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.' _lowerCamelCase : List[str] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.' _lowerCamelCase : int = attention_type _lowerCamelCase : List[str] = separate_cls _lowerCamelCase : Tuple = truncate_seq _lowerCamelCase : List[Any] = pool_q_only super().__init__(**SCREAMING_SNAKE_CASE) @property def UpperCamelCase_ ( self) -> Union[str, Any]: return sum(self.block_sizes) @num_hidden_layers.setter def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""") @property def UpperCamelCase_ ( self) -> List[Any]: return len(self.block_sizes) @num_blocks.setter def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""")
88
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
1
"""simple docstring""" from __future__ import annotations from dataclasses import dataclass @dataclass class lowercase__ : __UpperCAmelCase = 42 __UpperCAmelCase = None __UpperCAmelCase = None def _snake_case ( __snake_case : TreeNode | None ): """simple docstring""" def is_valid_tree(__snake_case : TreeNode | None ) -> bool: if node is None: return True if not isinstance(__snake_case , __snake_case ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(__snake_case ): raise ValueError( """Each node should be type of TreeNode and data should be float.""" ) def is_binary_search_tree_recursive_check( __snake_case : TreeNode | None , __snake_case : float , __snake_case : float ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , __snake_case , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , __snake_case ) ) return is_binary_search_tree_recursive_check(__snake_case , -float("""inf""" ) , float("""inf""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" if "resnet-50" in model_name: _lowerCamelCase : Optional[Any] = ResNetConfig.from_pretrained("""microsoft/resnet-50""" ) elif "resnet-101" in model_name: _lowerCamelCase : Any = ResNetConfig.from_pretrained("""microsoft/resnet-101""" ) else: raise ValueError("""Model name should include either resnet50 or resnet101""" ) _lowerCamelCase : Union[str, Any] = DetrConfig(use_timm_backbone=__snake_case , backbone_config=__snake_case ) # set label attributes _lowerCamelCase : List[Any] = """panoptic""" in model_name if is_panoptic: _lowerCamelCase : str = 250 else: _lowerCamelCase : str = 91 _lowerCamelCase : int = """huggingface/label-files""" _lowerCamelCase : int = """coco-detection-id2label.json""" _lowerCamelCase : Tuple = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Optional[int] = {int(__snake_case ): v for k, v in idalabel.items()} _lowerCamelCase : int = idalabel _lowerCamelCase : Dict = {v: k for k, v in idalabel.items()} return config, is_panoptic def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : Any = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") ) rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") ) rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") ) rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") ) rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var', ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean', ) ) rename_keys.append( ( F'backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var', F'backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var', ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight', ) ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias') ) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append( (F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias') ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight', ) ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.multihead_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ] ) return rename_keys def _snake_case ( __snake_case : List[str] , __snake_case : Dict , __snake_case : int ): """simple docstring""" _lowerCamelCase : Optional[Any] = state_dict.pop(__snake_case ) _lowerCamelCase : Any = val def _snake_case ( __snake_case : Optional[int] , __snake_case : Any=False ): """simple docstring""" _lowerCamelCase : str = """""" if is_panoptic: _lowerCamelCase : Dict = """detr.""" # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _lowerCamelCase : int = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' ) _lowerCamelCase : List[Any] = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Dict = in_proj_weight[:256, :] _lowerCamelCase : Optional[Any] = in_proj_bias[:256] _lowerCamelCase : str = in_proj_weight[256:512, :] _lowerCamelCase : List[str] = in_proj_bias[256:512] _lowerCamelCase : Union[str, Any] = in_proj_weight[-256:, :] _lowerCamelCase : Union[str, Any] = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _lowerCamelCase : Optional[int] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' ) _lowerCamelCase : Optional[Any] = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Optional[Any] = in_proj_weight[:256, :] _lowerCamelCase : Optional[int] = in_proj_bias[:256] _lowerCamelCase : Optional[Any] = in_proj_weight[256:512, :] _lowerCamelCase : Any = in_proj_bias[256:512] _lowerCamelCase : Optional[Any] = in_proj_weight[-256:, :] _lowerCamelCase : Any = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention _lowerCamelCase : str = state_dict.pop( F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' ) _lowerCamelCase : str = state_dict.pop(F'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' ) # next, add query, keys and values (in that order) of cross-attention to the state dict _lowerCamelCase : List[Any] = in_proj_weight_cross_attn[:256, :] _lowerCamelCase : int = in_proj_bias_cross_attn[:256] _lowerCamelCase : int = in_proj_weight_cross_attn[256:512, :] _lowerCamelCase : str = in_proj_bias_cross_attn[256:512] _lowerCamelCase : Dict = in_proj_weight_cross_attn[-256:, :] _lowerCamelCase : Any = in_proj_bias_cross_attn[-256:] def _snake_case ( ): """simple docstring""" _lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCamelCase : Union[str, Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def _snake_case ( __snake_case : int , __snake_case : List[str]=None , __snake_case : int=False ): """simple docstring""" _lowerCamelCase , _lowerCamelCase : Tuple = get_detr_config(__snake_case ) # load original model from torch hub _lowerCamelCase : int = { """detr-resnet-50""": """detr_resnet50""", """detr-resnet-101""": """detr_resnet101""", } logger.info(F'Converting model {model_name}...' ) _lowerCamelCase : List[str] = torch.hub.load("""facebookresearch/detr""" , model_name_to_original_name[model_name] , pretrained=__snake_case ).eval() _lowerCamelCase : Tuple = detr.state_dict() # rename keys for src, dest in create_rename_keys(__snake_case ): if is_panoptic: _lowerCamelCase : str = """detr.""" + src rename_key(__snake_case , __snake_case , __snake_case ) # query, key and value matrices need special treatment read_in_q_k_v(__snake_case , is_panoptic=__snake_case ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _lowerCamelCase : Optional[Any] = """detr.model.""" if is_panoptic else """model.""" for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("""detr""" ) and not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ) ): _lowerCamelCase : Union[str, Any] = state_dict.pop(__snake_case ) _lowerCamelCase : Dict = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _lowerCamelCase : int = state_dict.pop(__snake_case ) _lowerCamelCase : int = val elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ): continue else: _lowerCamelCase : Any = state_dict.pop(__snake_case ) _lowerCamelCase : int = val else: if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ): _lowerCamelCase : Optional[int] = state_dict.pop(__snake_case ) _lowerCamelCase : str = val # finally, create HuggingFace model and load state dict _lowerCamelCase : Tuple = DetrForSegmentation(__snake_case ) if is_panoptic else DetrForObjectDetection(__snake_case ) model.load_state_dict(__snake_case ) model.eval() # verify our conversion on an image _lowerCamelCase : int = """coco_panoptic""" if is_panoptic else """coco_detection""" _lowerCamelCase : List[Any] = DetrImageProcessor(format=__snake_case ) _lowerCamelCase : Optional[int] = processor(images=prepare_img() , return_tensors="""pt""" ) _lowerCamelCase : Union[str, Any] = encoding["""pixel_values"""] _lowerCamelCase : Union[str, Any] = detr(__snake_case ) _lowerCamelCase : Any = model(__snake_case ) assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) model.save_pretrained(__snake_case ) processor.save_pretrained(__snake_case ) if push_to_hub: # Upload model and image processor to the hub logger.info("""Uploading PyTorch model and image processor to the hub...""" ) model.push_to_hub(F'nielsr/{model_name}' ) processor.push_to_hub(F'nielsr/{model_name}' ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""detr-resnet-50""", type=str, choices=["""detr-resnet-50""", """detr-resnet-101"""], help="""Name of the DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the model to the hub or not.""") UpperCAmelCase = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
88
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
1
"""simple docstring""" from __future__ import annotations def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ): """simple docstring""" if (direction == 1 and array[indexa] > array[indexa]) or ( direction == 0 and array[indexa] < array[indexa] ): _lowerCamelCase , _lowerCamelCase : List[Any] = array[indexa], array[indexa] def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ): """simple docstring""" if length > 1: _lowerCamelCase : List[str] = int(length / 2 ) for i in range(__snake_case , low + middle ): comp_and_swap(__snake_case , __snake_case , i + middle , __snake_case ) bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case ) bitonic_merge(__snake_case , low + middle , __snake_case , __snake_case ) def _snake_case ( __snake_case : list[int] , __snake_case : int , __snake_case : int , __snake_case : int ): """simple docstring""" if length > 1: _lowerCamelCase : List[str] = int(length / 2 ) bitonic_sort(__snake_case , __snake_case , __snake_case , 1 ) bitonic_sort(__snake_case , low + middle , __snake_case , 0 ) bitonic_merge(__snake_case , __snake_case , __snake_case , __snake_case ) if __name__ == "__main__": UpperCAmelCase = input("""Enter numbers separated by a comma:\n""").strip() UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")] bitonic_sort(unsorted, 0, len(unsorted), 1) print("""\nSorted array in ascending order is: """, end="""""") print(*unsorted, sep=""", """) bitonic_merge(unsorted, 0, len(unsorted), 0) print("""Sorted array in descending order is: """, end="""""") print(*unsorted, sep=""", """)
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
1
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() UpperCAmelCase = { """bart""": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), """bert""": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """bert-base-cased-finetuned-mrpc""": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """dpr""": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), """gpt2""": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlnet""": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm""": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """xlm-roberta""": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """transfo-xl""": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """openai-gpt""": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """roberta""": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """layoutlm""": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), """roberta-large-mnli""": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """camembert""": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """flaubert""": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert""": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """distilbert-base-distilled-squad""": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert""": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """lxmert-visual-feature-encoder""": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """ctrl""": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """albert""": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """t5""": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """electra""": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), """wav2vec2""": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def _snake_case ( __snake_case : List[str] , __snake_case : Dict , __snake_case : int , __snake_case : Optional[int] , __snake_case : List[Any]=False , __snake_case : Any=True ): """simple docstring""" if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: _lowerCamelCase : Tuple = cached_file(__snake_case , __snake_case , force_download=not use_cached_models ) _lowerCamelCase : Dict = config_class.from_json_file(__snake_case ) _lowerCamelCase : Tuple = True _lowerCamelCase : Union[str, Any] = True print(F'Building TensorFlow model from configuration: {config}' ) _lowerCamelCase : Optional[Any] = model_class(__snake_case ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): _lowerCamelCase : Tuple = cached_file( __snake_case , __snake_case , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: _lowerCamelCase : List[str] = load_pytorch_checkpoint_in_tfa_model(__snake_case , __snake_case ) if compare_with_pt_model: _lowerCamelCase : int = tf_model(tf_model.dummy_inputs , training=__snake_case ) # build the network _lowerCamelCase : Optional[Any] = torch.load(__snake_case , map_location="""cpu""" ) _lowerCamelCase : str = pt_model_class.from_pretrained( pretrained_model_name_or_path=__snake_case , config=__snake_case , state_dict=__snake_case ) with torch.no_grad(): _lowerCamelCase : Dict = pt_model(**pt_model.dummy_inputs ) _lowerCamelCase : str = pto[0].numpy() _lowerCamelCase : Any = tfo[0].numpy() _lowerCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) ) print(F'Max absolute difference between models outputs {diff}' ) assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}' # Save pytorch-model print(F'Save TensorFlow model to {tf_dump_path}' ) tf_model.save_weights(__snake_case , save_format="""h5""" ) def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : str=False , __snake_case : Optional[Any]=False , __snake_case : Optional[Any]=False , __snake_case : Tuple=False , ): """simple docstring""" if args_model_type is None: _lowerCamelCase : Union[str, Any] = list(MODEL_CLASSES.keys() ) else: _lowerCamelCase : Tuple = [args_model_type] for j, model_type in enumerate(__snake_case , start=1 ): print("""=""" * 100 ) print(F' Converting model type {j}/{len(__snake_case )}: {model_type}' ) print("""=""" * 100 ) if model_type not in MODEL_CLASSES: raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: _lowerCamelCase : Union[str, Any] = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: _lowerCamelCase : Optional[int] = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__snake_case , __snake_case ) , start=1 ): print("""-""" * 100 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F' Skipping finetuned checkpoint {model_shortcut_name}' ) continue _lowerCamelCase : int = model_shortcut_name elif only_convert_finetuned_models: print(F' Skipping not finetuned checkpoint {model_shortcut_name}' ) continue print( F' Converting checkpoint {i}/{len(__snake_case )}: {model_shortcut_name} - model_type {model_type}' ) print("""-""" * 100 ) if config_shortcut_name in aws_config_map: _lowerCamelCase : Optional[Any] = cached_file(__snake_case , __snake_case , force_download=not use_cached_models ) else: _lowerCamelCase : Dict = config_shortcut_name if model_shortcut_name in aws_model_maps: _lowerCamelCase : int = cached_file(__snake_case , __snake_case , force_download=not use_cached_models ) else: _lowerCamelCase : int = model_shortcut_name if os.path.isfile(__snake_case ): _lowerCamelCase : Optional[int] = """converted_model""" convert_pt_checkpoint_to_tf( model_type=__snake_case , pytorch_checkpoint_path=__snake_case , config_file=__snake_case , tf_dump_path=os.path.join(__snake_case , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=__snake_case , ) if remove_cached_files: os.remove(__snake_case ) os.remove(__snake_case ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file.""" ) parser.add_argument( """--model_type""", default=None, type=str, help=( f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ''' """convert all the models from AWS.""" ), ) parser.add_argument( """--pytorch_checkpoint_path""", default=None, type=str, help=( """Path to the PyTorch checkpoint path or shortcut name to download from AWS. """ """If not given, will download and convert all the checkpoints from AWS.""" ), ) parser.add_argument( """--config_file""", default=None, type=str, help=( """The config json file corresponding to the pre-trained model. \n""" """This specifies the model architecture. If not given and """ """--pytorch_checkpoint_path is not given or is a shortcut name """ """use the configuration associated to the shortcut name on the AWS""" ), ) parser.add_argument( """--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions.""" ) parser.add_argument( """--use_cached_models""", action="""store_true""", help="""Use cached models if possible instead of updating to latest checkpoint versions.""", ) parser.add_argument( """--remove_cached_files""", action="""store_true""", help="""Remove pytorch models after conversion (save memory when converting in batches).""", ) parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""") UpperCAmelCase = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
88
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
1
"""simple docstring""" from __future__ import annotations def _snake_case ( __snake_case : list[int] , __snake_case : int ): """simple docstring""" _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Union[str, Any] = len(__snake_case ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: _lowerCamelCase : Tuple = i + 1 else: _lowerCamelCase : Optional[int] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
88
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class lowercase__ ( unittest.TestCase ): __UpperCAmelCase = ViTImageProcessor if is_vision_available() else None @property def UpperCamelCase_ ( self) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : List[str] = (3, 32, 128) _lowerCamelCase : List[Any] = tempfile.mkdtemp() # fmt: off _lowerCamelCase : Optional[int] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on _lowerCamelCase : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE)))) _lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""]) with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE) + """\n""") _lowerCamelCase : Any = { """do_normalize""": False, """do_resize""": True, """image_processor_type""": """ViTImageProcessor""", """resample""": 3, """size""": {"""height""": 32, """width""": 128}, } _lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE) with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Union[str, Any]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> str: return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: shutil.rmtree(self.tmpdirname) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta) _lowerCamelCase : Union[str, Any] = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1)) return image_input def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = self.get_tokenizer() _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : Tuple = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Dict = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab()) self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string()) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : Any = self.get_image_processor() _lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Optional[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0) _lowerCamelCase : Any = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.char_tokenizer , SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = self.prepare_image_inputs() _lowerCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""np""") _lowerCamelCase : Tuple = processor(images=SCREAMING_SNAKE_CASE , return_tensors="""np""") for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2) def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Dict = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : Dict = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = """test""" _lowerCamelCase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key]) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : Any = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = """test""" _lowerCamelCase : Dict = self.prepare_image_inputs() _lowerCamelCase : Tuple = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE) self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""]) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE): processor() def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Tuple = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : Dict = processor.char_decode(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = [seq.replace(""" """ , """""") for seq in decoded_tok] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : Dict = self.get_tokenizer() _lowerCamelCase : int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) _lowerCamelCase : str = None _lowerCamelCase : List[str] = self.prepare_image_inputs() _lowerCamelCase : List[Any] = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE) self.assertListEqual(list(inputs.keys()) , processor.model_input_names) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Optional[Any] = self.get_tokenizer() _lowerCamelCase : int = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = torch.randn(1 , 27 , 38) _lowerCamelCase : int = torch.randn(1 , 27 , 5_0257) _lowerCamelCase : Dict = torch.randn(1 , 27 , 3_0522) _lowerCamelCase : Union[str, Any] = processor.batch_decode([char_input, bpe_input, wp_input]) self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
88
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
1
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml UpperCAmelCase = NewType("""DataClass""", Any) UpperCAmelCase = NewType("""DataClassType""", Any) def _snake_case ( __snake_case : Dict ): """simple docstring""" if isinstance(__snake_case , __snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).' ) def _snake_case ( __snake_case : list ): """simple docstring""" _lowerCamelCase : int = {str(__snake_case ): choice for choice in choices} return lambda __snake_case : str_to_choice.get(__snake_case , __snake_case ) def _snake_case ( *, __snake_case : Union[str, List[str]] = None , __snake_case : str = None , __snake_case : Any = dataclasses.MISSING , __snake_case : Callable[[], Any] = dataclasses.MISSING , __snake_case : dict = None , **__snake_case : List[str] , ): """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _lowerCamelCase : str = {} if aliases is not None: _lowerCamelCase : Optional[Any] = aliases if help is not None: _lowerCamelCase : Any = help return dataclasses.field(metadata=__snake_case , default=__snake_case , default_factory=__snake_case , **__snake_case ) class lowercase__ ( A_ ): __UpperCAmelCase = 42 def __init__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> Optional[Any]: # To make the default appear when using --help if "formatter_class" not in kwargs: _lowerCamelCase : Dict = ArgumentDefaultsHelpFormatter super().__init__(**SCREAMING_SNAKE_CASE) if dataclasses.is_dataclass(SCREAMING_SNAKE_CASE): _lowerCamelCase : Union[str, Any] = [dataclass_types] _lowerCamelCase : Tuple = list(SCREAMING_SNAKE_CASE) for dtype in self.dataclass_types: self._add_dataclass_arguments(SCREAMING_SNAKE_CASE) @staticmethod def UpperCamelCase_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : Dict = F'--{field.name}' _lowerCamelCase : int = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , SCREAMING_SNAKE_CASE): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""") _lowerCamelCase : Any = kwargs.pop("""aliases""" , []) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): _lowerCamelCase : Optional[Any] = [aliases] _lowerCamelCase : Optional[int] = getattr(field.type , """__origin__""" , field.type) if origin_type is Union or (hasattr(SCREAMING_SNAKE_CASE , """UnionType""") and isinstance(SCREAMING_SNAKE_CASE , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(SCREAMING_SNAKE_CASE) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" F' Problem encountered in field \'{field.name}\'.') if type(SCREAMING_SNAKE_CASE) not in field.type.__args__: # filter `str` in Union _lowerCamelCase : Optional[Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _lowerCamelCase : Union[str, Any] = getattr(field.type , """__origin__""" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _lowerCamelCase : List[str] = ( field.type.__args__[0] if isinstance(SCREAMING_SNAKE_CASE , field.type.__args__[1]) else field.type.__args__[1] ) _lowerCamelCase : Tuple = getattr(field.type , """__origin__""" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _lowerCamelCase : Dict = {} if origin_type is Literal or (isinstance(field.type , SCREAMING_SNAKE_CASE) and issubclass(field.type , SCREAMING_SNAKE_CASE)): if origin_type is Literal: _lowerCamelCase : Union[str, Any] = field.type.__args__ else: _lowerCamelCase : Optional[int] = [x.value for x in field.type] _lowerCamelCase : int = make_choice_type_function(kwargs["""choices"""]) if field.default is not dataclasses.MISSING: _lowerCamelCase : Optional[int] = field.default else: _lowerCamelCase : Optional[int] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _lowerCamelCase : Dict = copy(SCREAMING_SNAKE_CASE) # Hack because type=bool in argparse does not behave as we want. _lowerCamelCase : List[str] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _lowerCamelCase : List[str] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _lowerCamelCase : List[Any] = default # This tells argparse we accept 0 or 1 value after --field_name _lowerCamelCase : str = """?""" # This is the value that will get picked if we do --field_name (without value) _lowerCamelCase : Dict = True elif isclass(SCREAMING_SNAKE_CASE) and issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): _lowerCamelCase : Any = field.type.__args__[0] _lowerCamelCase : Optional[int] = """+""" if field.default_factory is not dataclasses.MISSING: _lowerCamelCase : int = field.default_factory() elif field.default is dataclasses.MISSING: _lowerCamelCase : Dict = True else: _lowerCamelCase : List[Any] = field.type if field.default is not dataclasses.MISSING: _lowerCamelCase : Any = field.default elif field.default_factory is not dataclasses.MISSING: _lowerCamelCase : List[Any] = field.default_factory() else: _lowerCamelCase : int = True parser.add_argument(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _lowerCamelCase : Tuple = False parser.add_argument(F'--no_{field.name}' , action="""store_false""" , dest=field.name , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any: if hasattr(SCREAMING_SNAKE_CASE , """_argument_group_name"""): _lowerCamelCase : List[Any] = self.add_argument_group(dtype._argument_group_name) else: _lowerCamelCase : str = self try: _lowerCamelCase : Dict[str, type] = get_type_hints(SCREAMING_SNAKE_CASE) except NameError: raise RuntimeError( F'Type resolution failed for {dtype}. Try declaring the class in global scope or ' """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(SCREAMING_SNAKE_CASE): _lowerCamelCase : Optional[int] = """.""".join(map(SCREAMING_SNAKE_CASE , sys.version_info[:3])) raise RuntimeError( F'Type resolution failed for {dtype} on Python {python_version}. Try removing ' """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""") from ex raise for field in dataclasses.fields(SCREAMING_SNAKE_CASE): if not field.init: continue _lowerCamelCase : Union[str, Any] = type_hints[field.name] self._parse_dataclass_field(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ) -> Tuple[DataClass, ...]: if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): _lowerCamelCase : str = [] if args_filename: args_files.append(Path(SCREAMING_SNAKE_CASE)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(""".args""")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _lowerCamelCase : Optional[Any] = ArgumentParser() args_file_parser.add_argument(SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , action="""append""") # Use only remaining args for further parsing (remove the args_file_flag) _lowerCamelCase , _lowerCamelCase : int = args_file_parser.parse_known_args(args=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = vars(SCREAMING_SNAKE_CASE).get(args_file_flag.lstrip("""-""") , SCREAMING_SNAKE_CASE) if cmd_args_file_paths: args_files.extend([Path(SCREAMING_SNAKE_CASE) for p in cmd_args_file_paths]) _lowerCamelCase : List[str] = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _lowerCamelCase : Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:] _lowerCamelCase , _lowerCamelCase : Tuple = self.parse_known_args(args=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = [] for dtype in self.dataclass_types: _lowerCamelCase : str = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE) if f.init} _lowerCamelCase : str = {k: v for k, v in vars(SCREAMING_SNAKE_CASE).items() if k in keys} for k in keys: delattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = dtype(**SCREAMING_SNAKE_CASE) outputs.append(SCREAMING_SNAKE_CASE) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(SCREAMING_SNAKE_CASE) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(F'Some specified arguments are not used by the HfArgumentParser: {remaining_args}') return (*outputs,) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]: _lowerCamelCase : List[str] = set(args.keys()) _lowerCamelCase : List[Any] = [] for dtype in self.dataclass_types: _lowerCamelCase : Dict = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE) if f.init} _lowerCamelCase : Union[str, Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) _lowerCamelCase : Any = dtype(**SCREAMING_SNAKE_CASE) outputs.append(SCREAMING_SNAKE_CASE) if not allow_extra_keys and unused_keys: raise ValueError(F'Some keys are not used by the HfArgumentParser: {sorted(SCREAMING_SNAKE_CASE)}') return tuple(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]: with open(Path(SCREAMING_SNAKE_CASE) , encoding="""utf-8""") as open_json_file: _lowerCamelCase : Optional[int] = json.loads(open_json_file.read()) _lowerCamelCase : Optional[Any] = self.parse_dict(SCREAMING_SNAKE_CASE , allow_extra_keys=SCREAMING_SNAKE_CASE) return tuple(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False) -> Tuple[DataClass, ...]: _lowerCamelCase : Optional[int] = self.parse_dict(yaml.safe_load(Path(SCREAMING_SNAKE_CASE).read_text()) , allow_extra_keys=SCREAMING_SNAKE_CASE) return tuple(SCREAMING_SNAKE_CASE)
88
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCAmelCase = namedtuple( """_TestCommandArgs""", [ """dataset""", """name""", """cache_dir""", """data_dir""", """all_configs""", """save_infos""", """ignore_verifications""", """force_redownload""", """clear_cache""", ], defaults=[None, None, None, False, False, False, False, False], ) def _snake_case ( __snake_case : int , __snake_case : str ): """simple docstring""" return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def _snake_case ( __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = _TestCommandArgs(dataset=__snake_case , all_configs=__snake_case , save_infos=__snake_case ) _lowerCamelCase : Union[str, Any] = TestCommand(*__snake_case ) test_command.run() _lowerCamelCase : List[str] = os.path.join(__snake_case , """README.md""" ) assert os.path.exists(__snake_case ) _lowerCamelCase : Dict = DatasetInfosDict.from_directory(__snake_case ) _lowerCamelCase : int = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 2351563, """num_examples""": 10000, }, { """name""": """validation""", """num_bytes""": 238418, """num_examples""": 1000, }, ] , download_size=3940680 , dataset_size=2589981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: _lowerCamelCase , _lowerCamelCase : Any = getattr(dataset_infos["""default"""] , __snake_case ), getattr(expected_dataset_infos["""default"""] , __snake_case ) if key == "num_bytes": assert is_apercent_close(__snake_case , __snake_case ) elif key == "splits": assert list(__snake_case ) == list(__snake_case ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
88
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
1
"""simple docstring""" import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[Any] ): """simple docstring""" _lowerCamelCase : Tuple = LxmertConfig.from_json_file(__snake_case ) print(F'Building PyTorch model from configuration: {config}' ) _lowerCamelCase : Any = LxmertForPreTraining(__snake_case ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , __snake_case ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) UpperCAmelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
88
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
1
"""simple docstring""" import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class lowercase__ ( A_ ): __UpperCAmelCase = 42 __UpperCAmelCase = jnp.floataa __UpperCAmelCase = True def UpperCamelCase_ ( self) -> str: super().setup() _lowerCamelCase : Optional[int] = nn.Dense(5 , dtype=self.dtype) def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : Optional[int] = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class lowercase__ ( A_ ): __UpperCAmelCase = FlaxBigBirdForNaturalQuestionsModule def _snake_case ( __snake_case : Tuple , __snake_case : str , __snake_case : List[str] , __snake_case : Dict , __snake_case : Tuple , __snake_case : List[Any] ): """simple docstring""" def cross_entropy(__snake_case : Optional[Any] , __snake_case : str , __snake_case : Optional[int]=None ): _lowerCamelCase : Optional[int] = logits.shape[-1] _lowerCamelCase : Any = (labels[..., None] == jnp.arange(__snake_case )[None]).astype("""f4""" ) _lowerCamelCase : Any = jax.nn.log_softmax(__snake_case , axis=-1 ) _lowerCamelCase : Any = -jnp.sum(labels * logits , axis=-1 ) if reduction is not None: _lowerCamelCase : int = reduction(__snake_case ) return loss _lowerCamelCase : int = partial(__snake_case , reduction=jnp.mean ) _lowerCamelCase : int = cross_entropy(__snake_case , __snake_case ) _lowerCamelCase : int = cross_entropy(__snake_case , __snake_case ) _lowerCamelCase : List[str] = cross_entropy(__snake_case , __snake_case ) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class lowercase__ : __UpperCAmelCase = "google/bigbird-roberta-base" __UpperCAmelCase = 3000 __UpperCAmelCase = 10500 __UpperCAmelCase = 128 __UpperCAmelCase = 3 __UpperCAmelCase = 1 __UpperCAmelCase = 5 # tx_args __UpperCAmelCase = 3e-5 __UpperCAmelCase = 0.0 __UpperCAmelCase = 20000 __UpperCAmelCase = 0.0_0_9_5 __UpperCAmelCase = "bigbird-roberta-natural-questions" __UpperCAmelCase = "training-expt" __UpperCAmelCase = "data/nq-training.jsonl" __UpperCAmelCase = "data/nq-validation.jsonl" def UpperCamelCase_ ( self) -> Optional[int]: os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = os.path.join(self.base_dir , self.save_dir) _lowerCamelCase : Tuple = self.batch_size_per_device * jax.device_count() @dataclass class lowercase__ : __UpperCAmelCase = 42 __UpperCAmelCase = 4096 # no dynamic padding on TPUs def __call__( self , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : Dict = self.collate_fn(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) return batch def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase , _lowerCamelCase : Any = self.fetch_inputs(features["""input_ids"""]) _lowerCamelCase : Dict = { """input_ids""": jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa), """attention_mask""": jnp.array(SCREAMING_SNAKE_CASE , dtype=jnp.intaa), """start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa), """end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa), """pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa), } return batch def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : List[str] = [self._fetch_inputs(SCREAMING_SNAKE_CASE) for ids in input_ids] return zip(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict: _lowerCamelCase : List[Any] = [1 for _ in range(len(SCREAMING_SNAKE_CASE))] while len(SCREAMING_SNAKE_CASE) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def _snake_case ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any]=None ): """simple docstring""" if seed is not None: _lowerCamelCase : Optional[Any] = dataset.shuffle(seed=__snake_case ) for i in range(len(__snake_case ) // batch_size ): _lowerCamelCase : int = dataset[i * batch_size : (i + 1) * batch_size] yield dict(__snake_case ) @partial(jax.pmap , axis_name="""batch""" ) def _snake_case ( __snake_case : Any , __snake_case : List[str] , **__snake_case : Tuple ): """simple docstring""" def loss_fn(__snake_case : str ): _lowerCamelCase : List[Any] = model_inputs.pop("""start_labels""" ) _lowerCamelCase : Optional[Any] = model_inputs.pop("""end_labels""" ) _lowerCamelCase : List[str] = model_inputs.pop("""pooled_labels""" ) _lowerCamelCase : Optional[Any] = state.apply_fn(**__snake_case , params=__snake_case , dropout_rng=__snake_case , train=__snake_case ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[Any] = outputs return state.loss_fn( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) _lowerCamelCase , _lowerCamelCase : List[Any] = jax.random.split(__snake_case ) _lowerCamelCase : Any = jax.value_and_grad(__snake_case ) _lowerCamelCase , _lowerCamelCase : Any = grad_fn(state.params ) _lowerCamelCase : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) _lowerCamelCase : Tuple = jax.lax.pmean(__snake_case , """batch""" ) _lowerCamelCase : int = state.apply_gradients(grads=__snake_case ) return state, metrics, new_drp_rng @partial(jax.pmap , axis_name="""batch""" ) def _snake_case ( __snake_case : Any , **__snake_case : int ): """simple docstring""" _lowerCamelCase : Any = model_inputs.pop("""start_labels""" ) _lowerCamelCase : Any = model_inputs.pop("""end_labels""" ) _lowerCamelCase : Any = model_inputs.pop("""pooled_labels""" ) _lowerCamelCase : Optional[Any] = state.apply_fn(**__snake_case , params=state.params , train=__snake_case ) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = outputs _lowerCamelCase : Tuple = state.loss_fn(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) _lowerCamelCase : str = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" ) return metrics class lowercase__ ( train_state.TrainState ): __UpperCAmelCase = struct.field(pytree_node=A_ ) @dataclass class lowercase__ : __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = 42 __UpperCAmelCase = None def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Optional[int]: _lowerCamelCase : Tuple = model.params _lowerCamelCase : Any = TrainState.create( apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE , tx=SCREAMING_SNAKE_CASE , loss_fn=SCREAMING_SNAKE_CASE , ) if ckpt_dir is not None: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = restore_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = { """lr""": args.lr, """init_lr""": args.init_lr, """warmup_steps""": args.warmup_steps, """num_train_steps""": num_train_steps, """weight_decay""": args.weight_decay, } _lowerCamelCase , _lowerCamelCase : Optional[Any] = build_tx(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = train_state.TrainState( step=SCREAMING_SNAKE_CASE , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE , tx=SCREAMING_SNAKE_CASE , opt_state=SCREAMING_SNAKE_CASE , ) _lowerCamelCase : List[Any] = args _lowerCamelCase : Optional[Any] = data_collator _lowerCamelCase : Optional[Any] = lr _lowerCamelCase : Optional[Any] = params _lowerCamelCase : Optional[Any] = jax_utils.replicate(SCREAMING_SNAKE_CASE) return state def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Dict = self.args _lowerCamelCase : List[str] = len(SCREAMING_SNAKE_CASE) // args.batch_size _lowerCamelCase : Optional[Any] = jax.random.PRNGKey(0) _lowerCamelCase : List[Any] = jax.random.split(SCREAMING_SNAKE_CASE , jax.device_count()) for epoch in range(args.max_epochs): _lowerCamelCase : Tuple = jnp.array(0 , dtype=jnp.floataa) _lowerCamelCase : Tuple = get_batched_dataset(SCREAMING_SNAKE_CASE , args.batch_size , seed=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = 0 for batch in tqdm(SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , desc=F'Running EPOCH-{epoch}'): _lowerCamelCase : Union[str, Any] = self.data_collator(SCREAMING_SNAKE_CASE) _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.train_step_fn(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 if i % args.logging_steps == 0: _lowerCamelCase : Optional[int] = jax_utils.unreplicate(state.step) _lowerCamelCase : int = running_loss.item() / i _lowerCamelCase : Tuple = self.scheduler_fn(state_step - 1) _lowerCamelCase : Optional[Any] = self.evaluate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = { """step""": state_step.item(), """eval_loss""": eval_loss.item(), """tr_loss""": tr_loss, """lr""": lr.item(), } tqdm.write(str(SCREAMING_SNAKE_CASE)) self.logger.log(SCREAMING_SNAKE_CASE , commit=SCREAMING_SNAKE_CASE) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + F'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[Any]: _lowerCamelCase : Optional[int] = get_batched_dataset(SCREAMING_SNAKE_CASE , self.args.batch_size) _lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE) // self.args.batch_size _lowerCamelCase : str = jnp.array(0 , dtype=jnp.floataa) _lowerCamelCase : int = 0 for batch in tqdm(SCREAMING_SNAKE_CASE , total=SCREAMING_SNAKE_CASE , desc="""Evaluating ... """): _lowerCamelCase : int = self.data_collator(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = self.val_step_fn(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) running_loss += jax_utils.unreplicate(metrics["""loss"""]) i += 1 return running_loss / i def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE) print(F'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """) self.model_save_fn(SCREAMING_SNAKE_CASE , params=state.params) with open(os.path.join(SCREAMING_SNAKE_CASE , """opt_state.msgpack""") , """wb""") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE , """args.joblib""")) joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE , """data_collator.joblib""")) with open(os.path.join(SCREAMING_SNAKE_CASE , """training_state.json""") , """w""") as f: json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE) print("""DONE""") def _snake_case ( __snake_case : Any , __snake_case : Any ): """simple docstring""" print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ ) with open(os.path.join(__snake_case , """flax_model.msgpack""" ) , """rb""" ) as f: _lowerCamelCase : Dict = from_bytes(state.params , f.read() ) with open(os.path.join(__snake_case , """opt_state.msgpack""" ) , """rb""" ) as f: _lowerCamelCase : str = from_bytes(state.opt_state , f.read() ) _lowerCamelCase : Optional[Any] = joblib.load(os.path.join(__snake_case , """args.joblib""" ) ) _lowerCamelCase : Optional[int] = joblib.load(os.path.join(__snake_case , """data_collator.joblib""" ) ) with open(os.path.join(__snake_case , """training_state.json""" ) , """r""" ) as f: _lowerCamelCase : str = json.load(__snake_case ) _lowerCamelCase : Tuple = training_state["""step"""] print("""DONE""" ) return params, opt_state, step, args, data_collator def _snake_case ( __snake_case : List[Any] , __snake_case : Dict , __snake_case : List[str] , __snake_case : Any ): """simple docstring""" _lowerCamelCase : List[Any] = num_train_steps - warmup_steps _lowerCamelCase : Optional[int] = optax.linear_schedule(init_value=__snake_case , end_value=__snake_case , transition_steps=__snake_case ) _lowerCamelCase : List[str] = optax.linear_schedule(init_value=__snake_case , end_value=1E-7 , transition_steps=__snake_case ) _lowerCamelCase : Optional[Any] = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] ) return lr def _snake_case ( __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Dict ): """simple docstring""" def weight_decay_mask(__snake_case : Union[str, Any] ): _lowerCamelCase : Any = traverse_util.flatten_dict(__snake_case ) _lowerCamelCase : List[Any] = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()} return traverse_util.unflatten_dict(__snake_case ) _lowerCamelCase : List[str] = scheduler_fn(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCamelCase : str = optax.adamw(learning_rate=__snake_case , weight_decay=__snake_case , mask=__snake_case ) return tx, lr
88
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = StableDiffusionSAGPipeline __UpperCAmelCase = TEXT_TO_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: torch.manual_seed(0) _lowerCamelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) _lowerCamelCase : int = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE , set_alpha_to_one=SCREAMING_SNAKE_CASE , ) torch.manual_seed(0) _lowerCamelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0) _lowerCamelCase : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""") _lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> List[Any]: if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = { """prompt""": """.""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 1.0, """sag_scale""": 1.0, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self) -> Tuple: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""") _lowerCamelCase : Union[str, Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = """.""" _lowerCamelCase : int = torch.manual_seed(0) _lowerCamelCase : Tuple = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Dict = output.images _lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Optional[Any] = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Dict = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """.""" _lowerCamelCase : List[str] = torch.manual_seed(0) _lowerCamelCase : int = sag_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""") _lowerCamelCase : Any = output.images _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _lowerCamelCase : Any = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""") _lowerCamelCase : Optional[Any] = sag_pipe.to(SCREAMING_SNAKE_CASE) sag_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = """.""" _lowerCamelCase : Union[str, Any] = torch.manual_seed(0) _lowerCamelCase : Optional[int] = sag_pipe( [prompt] , width=768 , height=512 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , ) _lowerCamelCase : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
88
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline __UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] __UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] __UpperCAmelCase = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] __UpperCAmelCase = False @property def UpperCamelCase_ ( self) -> Optional[Any]: return 32 @property def UpperCamelCase_ ( self) -> str: return 32 @property def UpperCamelCase_ ( self) -> str: return self.time_input_dim @property def UpperCamelCase_ ( self) -> Union[str, Any]: return self.time_input_dim * 4 @property def UpperCamelCase_ ( self) -> str: return 100 @property def UpperCamelCase_ ( self) -> Optional[int]: torch.manual_seed(0) _lowerCamelCase : str = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } _lowerCamelCase : Dict = UNetaDConditionModel(**SCREAMING_SNAKE_CASE) return model @property def UpperCamelCase_ ( self) -> Optional[Any]: return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCamelCase_ ( self) -> str: torch.manual_seed(0) _lowerCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs) return model def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : int = self.dummy_unet _lowerCamelCase : Optional[Any] = self.dummy_movq _lowerCamelCase : Any = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } _lowerCamelCase : List[Any] = DDIMScheduler(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0) -> str: _lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( SCREAMING_SNAKE_CASE) # create init_image _lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1)[0] _lowerCamelCase : Optional[int] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE)).convert("""RGB""").resize((256, 256)) # create hint _lowerCamelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE)).to(SCREAMING_SNAKE_CASE) if str(SCREAMING_SNAKE_CASE).startswith("""mps"""): _lowerCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE) else: _lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE).manual_seed(SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : List[str] = """cpu""" _lowerCamelCase : Any = self.get_dummy_components() _lowerCamelCase : int = self.pipeline_class(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = pipe.to(SCREAMING_SNAKE_CASE) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = output.images _lowerCamelCase : Optional[int] = pipe( **self.get_dummy_inputs(SCREAMING_SNAKE_CASE) , return_dict=SCREAMING_SNAKE_CASE , )[0] _lowerCamelCase : List[str] = image[0, -3:, -3:, -1] _lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _lowerCamelCase : List[Any] = np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> Optional[int]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""") _lowerCamelCase : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""") _lowerCamelCase : Dict = init_image.resize((512, 512)) _lowerCamelCase : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""") _lowerCamelCase : str = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE)).float() / 2_55.0 _lowerCamelCase : Tuple = hint.permute(2 , 0 , 1).unsqueeze(0) _lowerCamelCase : Optional[int] = """A robot, 4k photo""" _lowerCamelCase : Tuple = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa) pipe_prior.to(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa) _lowerCamelCase : Dict = pipeline.to(SCREAMING_SNAKE_CASE) pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = torch.Generator(device="""cpu""").manual_seed(0) _lowerCamelCase , _lowerCamelCase : List[Any] = pipe_prior( SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.85 , generator=SCREAMING_SNAKE_CASE , negative_prompt="""""" , ).to_tuple() _lowerCamelCase : List[str] = pipeline( image=SCREAMING_SNAKE_CASE , image_embeds=SCREAMING_SNAKE_CASE , negative_image_embeds=SCREAMING_SNAKE_CASE , hint=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , ) _lowerCamelCase : Any = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]: _lowerCamelCase : List[str] = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = is_training _lowerCamelCase : Tuple = use_auxiliary_loss _lowerCamelCase : Any = num_queries _lowerCamelCase : List[str] = num_channels _lowerCamelCase : List[str] = min_size _lowerCamelCase : Tuple = max_size _lowerCamelCase : str = num_labels _lowerCamelCase : Any = hidden_dim _lowerCamelCase : Dict = hidden_dim def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to( SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5 ).float() _lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long() _lowerCamelCase : Optional[int] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCamelCase_ ( self) -> str: _lowerCamelCase : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) _lowerCamelCase : Any = self.num_queries _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = [1, 1, 1, 1] _lowerCamelCase : Any = self.num_channels _lowerCamelCase : Optional[Any] = 64 _lowerCamelCase : str = 128 _lowerCamelCase : Optional[Any] = self.hidden_dim _lowerCamelCase : Any = self.hidden_dim _lowerCamelCase : List[Any] = self.hidden_dim return config def UpperCamelCase_ ( self) -> Any: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs() _lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask} return config, inputs_dict def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : str = output.encoder_hidden_states _lowerCamelCase : int = output.pixel_decoder_hidden_states _lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths)) self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]: with torch.no_grad(): _lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(output.encoder_last_hidden_state is not None) if output_hidden_states: self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() def comm_check_on_output(SCREAMING_SNAKE_CASE): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None) self.parent.assertTrue(result.encoder_last_hidden_state is not None) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1)) with torch.no_grad(): _lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = model( pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) comm_check_on_output(SCREAMING_SNAKE_CASE) self.parent.assertTrue(result.loss is not None) self.parent.assertEqual(result.loss.shape , torch.Size([1])) @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {} __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Optional[int] = MaskaFormerModelTester(self) _lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> int: _lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Mask2Former does not use inputs_embeds""") def UpperCamelCase_ ( self) -> Optional[int]: pass @unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""") def UpperCamelCase_ ( self) -> Tuple: pass @unittest.skip(reason="""Mask2Former is not a generative model""") def UpperCamelCase_ ( self) -> List[Any]: pass @unittest.skip(reason="""Mask2Former does not use token embeddings""") def UpperCamelCase_ ( self) -> Any: pass @require_torch_multi_gpu @unittest.skip( reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""") def UpperCamelCase_ ( self) -> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""") def UpperCamelCase_ ( self) -> Optional[int]: pass def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : str = [*signature.parameters.keys()] _lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Optional[int]: for model_name in ["facebook/mask2former-swin-small-coco-instance"]: _lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Dict = (self.model_tester.min_size,) * 2 _lowerCamelCase : str = { """pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE), """mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE), """class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(), } _lowerCamelCase : List[str] = self.model_tester.get_config() _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None) def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE) self.assertTrue(outputs.attentions is not None) def UpperCamelCase_ ( self) -> Optional[Any]: if not self.model_tester.is_training: return _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss loss.backward() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Any = self.all_model_classes[1] _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() _lowerCamelCase : int = True _lowerCamelCase : Optional[Any] = True _lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) model.train() _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() _lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _lowerCamelCase : Optional[int] = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(pixel_decoder_hidden_states.grad) self.assertIsNotNone(transformer_decoder_hidden_states.grad) self.assertIsNotNone(attentions.grad) UpperCAmelCase = 1e-4 def _snake_case ( ): """simple docstring""" _lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_vision @slow class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCamelCase_ ( self) -> Union[str, Any]: return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : List[str] = prepare_img() _lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = torch.tensor( [[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Any = torch.tensor( [[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) _lowerCamelCase : Dict = torch.tensor( [[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : Optional[Any] = self.default_image_processor _lowerCamelCase : Any = prepare_img() _lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0) # check size self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384)) with torch.no_grad(): _lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE) # masks_queries_logits _lowerCamelCase : str = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4)) _lowerCamelCase : Any = [ [-8.78_39, -9.00_56, -8.81_21], [-7.41_04, -7.03_13, -6.54_01], [-6.61_05, -6.34_27, -6.46_75], ] _lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) # class_queries_logits _lowerCamelCase : List[str] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1)) _lowerCamelCase : Optional[Any] = torch.tensor( [ [1.83_24, -8.08_35, -4.19_22], [0.84_50, -9.00_50, -3.60_53], [0.30_45, -7.72_93, -3.02_75], ]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval() _lowerCamelCase : str = self.default_image_processor _lowerCamelCase : Tuple = image_processor( [np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , ) _lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]] _lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]] with torch.no_grad(): _lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE) self.assertTrue(outputs.loss is not None)
88
1
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : int ): """simple docstring""" _lowerCamelCase : str = word.split() def justify(__snake_case : list , __snake_case : int , __snake_case : int ) -> str: _lowerCamelCase : str = max_width - width _lowerCamelCase : List[str] = len(__snake_case ) if len(__snake_case ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: _lowerCamelCase : Tuple = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] _lowerCamelCase : Dict = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] _lowerCamelCase : List[str] = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(__snake_case ): num_spaces_between_words_list[i] += 1 _lowerCamelCase : List[Any] = [] for i in range(__snake_case ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(__snake_case ) _lowerCamelCase : Dict = [] _lowerCamelCase : list[str] = [] _lowerCamelCase : Optional[int] = 0 for word in words: if width + len(__snake_case ) + len(__snake_case ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(__snake_case ) width += len(__snake_case ) else: # justify the line and add it to result answer.append(justify(__snake_case , __snake_case , __snake_case ) ) # reset new line and new width _lowerCamelCase , _lowerCamelCase : Dict = [word], len(__snake_case ) _lowerCamelCase : Any = max_width - width - len(__snake_case ) answer.append(""" """.join(__snake_case ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
88
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) UpperCAmelCase = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) UpperCAmelCase = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) UpperCAmelCase = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) UpperCAmelCase = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) UpperCAmelCase = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModel) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class lowercase__ ( _BaseAutoModelClass ): __UpperCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
88
1
"""simple docstring""" import unittest from transformers import BigBirdTokenizer, BigBirdTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase = """▁""" UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = BigBirdTokenizer __UpperCAmelCase = BigBirdTokenizerFast __UpperCAmelCase = True __UpperCAmelCase = True def UpperCamelCase_ ( self) -> Any: super().setUp() _lowerCamelCase : Optional[Any] = self.tokenizer_class(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE) tokenizer.save_pretrained(self.tmpdirname) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Tuple = """<s>""" _lowerCamelCase : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : int = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<unk>""") self.assertEqual(vocab_keys[1] , """<s>""") self.assertEqual(vocab_keys[-1] , """[MASK]""") self.assertEqual(len(SCREAMING_SNAKE_CASE) , 1004) def UpperCamelCase_ ( self) -> List[Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1000) def UpperCamelCase_ ( self) -> int: if not self.test_rust_tokenizer: return _lowerCamelCase : Optional[int] = self.get_tokenizer() _lowerCamelCase : Union[str, Any] = self.get_rust_tokenizer() _lowerCamelCase : Dict = """I was born in 92000, and this is falsé.""" _lowerCamelCase : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : str = self.get_rust_tokenizer() _lowerCamelCase : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = rust_tokenizer.encode(SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = BigBirdTokenizer(SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = tokenizer.tokenize("""This is a test""") self.assertListEqual(SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , [285, 46, 10, 170, 382] , ) _lowerCamelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") self.assertListEqual( SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) _lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) self.assertListEqual( SCREAMING_SNAKE_CASE , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) _lowerCamelCase : str = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE) self.assertListEqual( SCREAMING_SNAKE_CASE , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def UpperCamelCase_ ( self) -> List[Any]: return BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") @slow def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[int] = """Hello World!""" _lowerCamelCase : Tuple = [65, 1_8536, 2260, 101, 66] self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE)) @slow def UpperCamelCase_ ( self) -> Tuple: _lowerCamelCase : Optional[int] = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth""" ) # fmt: off _lowerCamelCase : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231 # fmt: on self.assertListEqual(SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE)) @require_torch @slow def UpperCamelCase_ ( self) -> Tuple: import torch from transformers import BigBirdConfig, BigBirdModel # Build sequence _lowerCamelCase : List[str] = list(self.big_tokenizer.get_vocab().keys())[:10] _lowerCamelCase : str = """ """.join(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = self.big_tokenizer.encode_plus(SCREAMING_SNAKE_CASE , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = self.big_tokenizer.batch_encode_plus( [sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = BigBirdConfig(attention_type="""original_full""") _lowerCamelCase : Optional[Any] = BigBirdModel(SCREAMING_SNAKE_CASE) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**SCREAMING_SNAKE_CASE) model(**SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Dict = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""") _lowerCamelCase : Any = tokenizer.decode(tokenizer("""Paris is the [MASK].""").input_ids) self.assertTrue(decoded_text == """[CLS] Paris is the[MASK].[SEP]""") @slow def UpperCamelCase_ ( self) -> Union[str, Any]: # fmt: off _lowerCamelCase : Optional[int] = {"""input_ids""": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE , model_name="""google/bigbird-roberta-base""" , revision="""215c99f1600e06f83acce68422f2035b2b5c3510""" , )
88
"""simple docstring""" # limitations under the License. # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401 from .utils import deprecate deprecate( """pipelines_utils""", """0.22.0""", """Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""", standard_warn=False, stacklevel=3, )
88
1
"""simple docstring""" import inspect import unittest import numpy as np from transformers import ViTConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel class lowercase__ ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , ) -> Dict: _lowerCamelCase : int = parent _lowerCamelCase : List[Any] = batch_size _lowerCamelCase : Tuple = image_size _lowerCamelCase : Dict = patch_size _lowerCamelCase : Union[str, Any] = num_channels _lowerCamelCase : List[Any] = is_training _lowerCamelCase : Any = use_labels _lowerCamelCase : List[Any] = hidden_size _lowerCamelCase : Any = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : Union[str, Any] = intermediate_size _lowerCamelCase : Any = hidden_act _lowerCamelCase : Optional[Any] = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : str = type_sequence_label_size _lowerCamelCase : List[str] = initializer_range # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 _lowerCamelCase : Any = num_patches + 1 def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : int = ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) return config, pixel_values def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : Dict = FlaxViTModel(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) _lowerCamelCase : Dict = (self.image_size, self.image_size) _lowerCamelCase : int = (self.patch_size, self.patch_size) _lowerCamelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = self.type_sequence_label_size _lowerCamelCase : List[Any] = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _lowerCamelCase : Dict = 1 _lowerCamelCase : int = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_flax class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else () def UpperCamelCase_ ( self) -> None: _lowerCamelCase : Dict = FlaxViTModelTester(self) _lowerCamelCase : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37) def UpperCamelCase_ ( self) -> int: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCamelCase : Optional[int] = [*signature.parameters.keys()] _lowerCamelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): _lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE): return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) with self.subTest("""JIT Enabled"""): _lowerCamelCase : List[str] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple() with self.subTest("""JIT Disabled"""): with jax.disable_jit(): _lowerCamelCase : List[Any] = model_jitted(**SCREAMING_SNAKE_CASE).to_tuple() self.assertEqual(len(SCREAMING_SNAKE_CASE) , len(SCREAMING_SNAKE_CASE)) for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): self.assertEqual(jitted_output.shape , output.shape) @slow def UpperCamelCase_ ( self) -> List[str]: for model_class_name in self.all_model_classes: _lowerCamelCase : Tuple = model_class_name.from_pretrained("""google/vit-base-patch16-224""") _lowerCamelCase : List[Any] = model(np.ones((1, 3, 224, 224))) self.assertIsNotNone(SCREAMING_SNAKE_CASE)
88
"""simple docstring""" def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ): """simple docstring""" if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ): """simple docstring""" if curr_ind == len(__snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(__snake_case ) ): if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ): # Insert current vertex into path as next transition _lowerCamelCase : List[str] = next_ver # Validate created path if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ): return True # Backtrack _lowerCamelCase : Tuple = -1 return False def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ): """simple docstring""" _lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1) # initialize start and end of path with starting index _lowerCamelCase : Optional[int] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
88
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: UpperCAmelCase = None UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", }, """tokenizer_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""", }, } UpperCAmelCase = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } UpperCAmelCase = """▁""" # Segments (not really needed) UpperCAmelCase = 0 UpperCAmelCase = 1 UpperCAmelCase = 2 UpperCAmelCase = 3 UpperCAmelCase = 4 class lowercase__ ( A_ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = '''left''' __UpperCAmelCase = XLNetTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<sep>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<cls>" , SCREAMING_SNAKE_CASE="<mask>" , SCREAMING_SNAKE_CASE=["<eop>", "<eod>"] , **SCREAMING_SNAKE_CASE , ) -> Dict: # Mask token behave like a normal word, i.e. include the space before it _lowerCamelCase : Any = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) else mask_token super().__init__( vocab_file=SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , remove_space=SCREAMING_SNAKE_CASE , keep_accents=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Optional[Any] = 3 _lowerCamelCase : Optional[Any] = do_lower_case _lowerCamelCase : str = remove_space _lowerCamelCase : Tuple = keep_accents _lowerCamelCase : List[Any] = vocab_file _lowerCamelCase : int = False if not self.vocab_file else True def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : List[str] = [self.sep_token_id] _lowerCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : Optional[int] = [self.sep_token_id] _lowerCamelCase : str = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""") if not os.path.isdir(SCREAMING_SNAKE_CASE): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return _lowerCamelCase : str = os.path.join( SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE): copyfile(self.vocab_file , SCREAMING_SNAKE_CASE) return (out_vocab_file,)
88
"""simple docstring""" import mpmath # for roots of unity import numpy as np class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None) -> Tuple: # Input as list _lowerCamelCase : Any = list(poly_a or [0])[:] _lowerCamelCase : Optional[Any] = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() _lowerCamelCase : int = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() _lowerCamelCase : Union[str, Any] = len(self.polyB) # Add 0 to make lengths equal a power of 2 _lowerCamelCase : List[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform _lowerCamelCase : Optional[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1)) # The product _lowerCamelCase : int = self.__multiply() def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : Dict = [[x] for x in self.polyA] if which == """A""" else [[x] for x in self.polyB] # Corner case if len(SCREAMING_SNAKE_CASE) <= 1: return dft[0] # _lowerCamelCase : str = self.c_max_length // 2 while next_ncol > 0: _lowerCamelCase : Dict = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : Tuple = self.root**next_ncol # First half of next step _lowerCamelCase : int = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step _lowerCamelCase : Optional[int] = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(SCREAMING_SNAKE_CASE): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update _lowerCamelCase : Union[str, Any] = new_dft _lowerCamelCase : List[str] = next_ncol // 2 return dft[0] def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Optional[Any] = self.__dft("""A""") _lowerCamelCase : List[str] = self.__dft("""B""") _lowerCamelCase : List[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT _lowerCamelCase : List[str] = 2 while next_ncol <= self.c_max_length: _lowerCamelCase : Any = [[] for i in range(SCREAMING_SNAKE_CASE)] _lowerCamelCase : List[Any] = self.root ** (next_ncol // 2) _lowerCamelCase : str = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update _lowerCamelCase : Any = new_inverse_c next_ncol *= 2 # Unpack _lowerCamelCase : Optional[Any] = [round(x[0].real , 8) + round(x[0].imag , 8) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Any: _lowerCamelCase : Dict = """A = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) _lowerCamelCase : List[Any] = """B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) _lowerCamelCase : int = """A*B = """ + """ + """.join( F'{coef}*x^{i}' for coef, i in enumerate(self.product)) return F'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) def _snake_case ( __snake_case : Optional[Any] , __snake_case : Union[str, Any]=False ): """simple docstring""" _lowerCamelCase : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _lowerCamelCase : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def _snake_case ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Tuple=False ): """simple docstring""" for i in range(config.num_hidden_layers ): if base_model: _lowerCamelCase : List[Any] = """""" else: _lowerCamelCase : Any = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _lowerCamelCase : str = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _lowerCamelCase : Optional[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase : Any = in_proj_weight[ : config.hidden_size, : ] _lowerCamelCase : List[Any] = in_proj_bias[: config.hidden_size] _lowerCamelCase : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _lowerCamelCase : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _lowerCamelCase : int = in_proj_weight[ -config.hidden_size :, : ] _lowerCamelCase : Dict = in_proj_bias[-config.hidden_size :] def _snake_case ( __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : Tuple = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(__snake_case , __snake_case ) def _snake_case ( __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Union[str, Any] = dct.pop(__snake_case ) _lowerCamelCase : Any = val def _snake_case ( ): """simple docstring""" _lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _lowerCamelCase : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ) return im @torch.no_grad() def _snake_case ( __snake_case : int , __snake_case : str , __snake_case : Any=True ): """simple docstring""" _lowerCamelCase : Union[str, Any] = ViTConfig() # patch_size if model_name[-1] == "8": _lowerCamelCase : str = 8 # set labels if required if not base_model: _lowerCamelCase : List[str] = 1000 _lowerCamelCase : List[Any] = """huggingface/label-files""" _lowerCamelCase : Any = """imagenet-1k-id2label.json""" _lowerCamelCase : int = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) ) _lowerCamelCase : Optional[Any] = {int(__snake_case ): v for k, v in idalabel.items()} _lowerCamelCase : Tuple = idalabel _lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: _lowerCamelCase : Union[str, Any] = 384 _lowerCamelCase : Tuple = 1536 _lowerCamelCase : List[str] = 12 _lowerCamelCase : Dict = 6 # load original model from torch hub _lowerCamelCase : Any = torch.hub.load("""facebookresearch/dino:main""" , __snake_case ) original_model.eval() # load state_dict of original model, remove and rename some keys _lowerCamelCase : Any = original_model.state_dict() if base_model: remove_classification_head_(__snake_case ) _lowerCamelCase : Union[str, Any] = create_rename_keys(__snake_case , base_model=__snake_case ) for src, dest in rename_keys: rename_key(__snake_case , __snake_case , __snake_case ) read_in_q_k_v(__snake_case , __snake_case , __snake_case ) # load HuggingFace model if base_model: _lowerCamelCase : Optional[Any] = ViTModel(__snake_case , add_pooling_layer=__snake_case ).eval() else: _lowerCamelCase : Optional[Any] = ViTForImageClassification(__snake_case ).eval() model.load_state_dict(__snake_case ) # Check outputs on an image, prepared by ViTImageProcessor _lowerCamelCase : Dict = ViTImageProcessor() _lowerCamelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) _lowerCamelCase : Optional[int] = encoding["""pixel_values"""] _lowerCamelCase : str = model(__snake_case ) if base_model: _lowerCamelCase : Optional[int] = original_model(__snake_case ) assert torch.allclose(__snake_case , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: _lowerCamelCase : Optional[Any] = original_model(__snake_case ) assert logits.shape == outputs.logits.shape assert torch.allclose(__snake_case , outputs.logits , atol=1E-3 ) Path(__snake_case ).mkdir(exist_ok=__snake_case ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(__snake_case ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(__snake_case ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) UpperCAmelCase = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
88
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) UpperCAmelCase = { """configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""VisionEncoderDecoderModel"""] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""TFVisionEncoderDecoderModel"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""FlaxVisionEncoderDecoderModel"""] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
88
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase__ ( A_ ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Tuple: super().__init__() # make sure scheduler can always be converted to DDIM _lowerCamelCase : List[Any] = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE): _lowerCamelCase : int = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: _lowerCamelCase : List[str] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) and len(SCREAMING_SNAKE_CASE) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE)}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.') _lowerCamelCase : int = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output _lowerCamelCase : int = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _lowerCamelCase : Optional[int] = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE).prev_sample _lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1) _lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _lowerCamelCase : Tuple = self.numpy_to_pil(SCREAMING_SNAKE_CASE) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE)
88
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def _snake_case ( __snake_case : List[str] ): """simple docstring""" for param in module.parameters(): _lowerCamelCase : Optional[Any] = False def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): _lowerCamelCase : Any = """mps""" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def _snake_case ( __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : int = plt.imshow(__snake_case ) fig.axes.get_xaxis().set_visible(__snake_case ) fig.axes.get_yaxis().set_visible(__snake_case ) plt.show() def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = datetime.now() _lowerCamelCase : Optional[Any] = current_time.strftime("""%H:%M:%S""" ) return timestamp
88
1
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class lowercase__ ( A_ ,unittest.TestCase ): __UpperCAmelCase = BartphoTokenizer __UpperCAmelCase = False __UpperCAmelCase = True def UpperCamelCase_ ( self) -> Optional[int]: super().setUp() _lowerCamelCase : Tuple = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] _lowerCamelCase : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE)))) _lowerCamelCase : int = {"""unk_token""": """<unk>"""} _lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""]) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""") as fp: for token in vocab_tokens: fp.write(F'{token} {vocab_tokens[token]}\n') _lowerCamelCase : str = BartphoTokenizer(SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map) tokenizer.save_pretrained(self.tmpdirname) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> List[str]: kwargs.update(self.special_tokens_map) return BartphoTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : List[str] = """This is a là test""" _lowerCamelCase : str = """This is a<unk><unk> test""" return input_text, output_text def UpperCamelCase_ ( self) -> Dict: _lowerCamelCase : Tuple = BartphoTokenizer(SCREAMING_SNAKE_CASE , self.monolingual_vocab_file , **self.special_tokens_map) _lowerCamelCase : Tuple = """This is a là test""" _lowerCamelCase : List[str] = """▁This ▁is ▁a ▁l à ▁t est""".split() _lowerCamelCase : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token] _lowerCamelCase : Tuple = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE) , SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class lowercase__ : __UpperCAmelCase = field( default='''cifar10''' ,metadata={'''help''': '''Name of a dataset from the datasets package'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''The column name of the images in the files.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the training data.'''} ) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''A folder containing the validation data.'''} ) __UpperCAmelCase = field( default=0.1_5 ,metadata={'''help''': '''Percent to split off of train for validation.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } ,) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = {} if self.train_dir is not None: _lowerCamelCase : int = self.train_dir if self.validation_dir is not None: _lowerCamelCase : Tuple = self.validation_dir _lowerCamelCase : Optional[int] = data_files if data_files else None @dataclass class lowercase__ : __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Pretrained config name or path if not the same as model_name_or_path'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } ,) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __UpperCAmelCase = field( default='''main''' ,metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} ,) __UpperCAmelCase = field(default=A_ ,metadata={'''help''': '''Name or path of preprocessor config.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } ,) __UpperCAmelCase = field( default=0.7_5 ,metadata={'''help''': '''The ratio of the number of masked tokens in the input sequence.'''} ) __UpperCAmelCase = field( default=A_ ,metadata={'''help''': '''Whether or not to train with normalized pixel values as target.'''} ) @dataclass class lowercase__ ( A_ ): __UpperCAmelCase = field( default=1e-3 ,metadata={'''help''': '''Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'''} ) def _snake_case ( __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : int = torch.stack([example["""pixel_values"""] for example in examples] ) return {"pixel_values": pixel_values} def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_mae""" , __snake_case , __snake_case ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(__snake_case ) transformers.utils.logging.set_verbosity(__snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset. _lowerCamelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : Tuple = None if """validation""" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __snake_case ) and data_args.train_val_split > 0.0: _lowerCamelCase : List[str] = ds["""train"""].train_test_split(data_args.train_val_split ) _lowerCamelCase : Union[str, Any] = split["""train"""] _lowerCamelCase : Optional[int] = split["""test"""] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : str = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Optional[Any] = ViTMAEConfig() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) # adapt config config.update( { """mask_ratio""": model_args.mask_ratio, """norm_pix_loss""": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : str = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__snake_case ) elif model_args.model_name_or_path: _lowerCamelCase : Dict = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__snake_case ) else: _lowerCamelCase : Union[str, Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _lowerCamelCase : Union[str, Any] = ViTMAEForPreTraining(__snake_case ) if training_args.do_train: _lowerCamelCase : List[Any] = ds["""train"""].column_names else: _lowerCamelCase : Union[str, Any] = ds["""validation"""].column_names if data_args.image_column_name is not None: _lowerCamelCase : str = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : Optional[Any] = """image""" elif "img" in column_names: _lowerCamelCase : List[Any] = """img""" else: _lowerCamelCase : str = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["""shortest_edge"""] else: _lowerCamelCase : List[Any] = (image_processor.size["""height"""], image_processor.size["""width"""]) _lowerCamelCase : Tuple = Compose( [ Lambda(lambda __snake_case : img.convert("""RGB""" ) if img.mode != "RGB" else img ), RandomResizedCrop(__snake_case , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__snake_case : Optional[Any] ): _lowerCamelCase : Dict = [transforms(__snake_case ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("""--do_train requires a train dataset""" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError("""--do_eval requires a validation dataset""" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Union[str, Any] = ( ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__snake_case ) # Compute absolute learning rate _lowerCamelCase : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : Tuple = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Optional[Any] = Trainer( model=__snake_case , args=__snake_case , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__snake_case , data_collator=__snake_case , ) # Training if training_args.do_train: _lowerCamelCase : Any = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : List[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=__snake_case ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : int = trainer.evaluate() trainer.log_metrics("""eval""" , __snake_case ) trainer.save_metrics("""eval""" , __snake_case ) # Write model card and (optionally) push to hub _lowerCamelCase : Optional[Any] = { """tasks""": """masked-auto-encoding""", """dataset""": data_args.dataset_name, """tags""": ["""masked-auto-encoding"""], } if training_args.push_to_hub: trainer.push_to_hub(**__snake_case ) else: trainer.create_model_card(**__snake_case ) def _snake_case ( __snake_case : Dict ): """simple docstring""" main() if __name__ == "__main__": main()
88
1
"""simple docstring""" import torch from transformers import AutoModel class lowercase__ ( torch.nn.Module ): def __init__( self , SCREAMING_SNAKE_CASE="sayef/fsner-bert-base-uncased") -> str: super(SCREAMING_SNAKE_CASE , self).__init__() _lowerCamelCase : Union[str, Any] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = torch.nn.CosineSimilarity(3 , 1e-0_8) _lowerCamelCase : Optional[int] = torch.nn.Softmax(dim=1) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> str: return self.bert(**SCREAMING_SNAKE_CASE).last_hidden_state def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[Any]: return token_embeddings.sum(2 , keepdim=SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1) -> Union[str, Any]: return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> List[str]: _lowerCamelCase : str = W_supports["""sizes"""].tolist() _lowerCamelCase : int = W_supports["""start_token_id"""].item() _lowerCamelCase : str = W_supports["""end_token_id"""].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] _lowerCamelCase : List[str] = self.BERT(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = self.BERT(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = None _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = W_supports["""input_ids"""] == start_token_id _lowerCamelCase : Any = W_supports["""input_ids"""] == end_token_id for i, size in enumerate(SCREAMING_SNAKE_CASE): if i == 0: _lowerCamelCase : List[str] = 0 else: _lowerCamelCase : Dict = support_sizes[i - 1] _lowerCamelCase : Union[str, Any] = S[s : s + size][start_token_masks[s : s + size]] _lowerCamelCase : Any = S[s : s + size][end_token_masks[s : s + size]] _lowerCamelCase : Any = torch.matmul(q[i] , s_start.T).sum(1).softmax(0) _lowerCamelCase : List[str] = torch.matmul(q[i] , s_end.T).sum(1).softmax(0) if p_starts is not None: _lowerCamelCase : str = torch.vstack((p_starts, p_start)) _lowerCamelCase : Optional[Any] = torch.vstack((p_ends, p_end)) else: _lowerCamelCase : Optional[Any] = p_start _lowerCamelCase : int = p_end return p_starts, p_ends
88
"""simple docstring""" import numpy as np def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return 1 / (1 + np.exp(-vector )) def _snake_case ( __snake_case : np.ndarray ): """simple docstring""" return vector * sigmoid(__snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" def _snake_case ( __snake_case : int , __snake_case : Dict , __snake_case : int , __snake_case : int ): """simple docstring""" global f # a global dp table for knapsack if f[i][j] < 0: if j < wt[i - 1]: _lowerCamelCase : Dict = mf_knapsack(i - 1 , __snake_case , __snake_case , __snake_case ) else: _lowerCamelCase : Union[str, Any] = max( mf_knapsack(i - 1 , __snake_case , __snake_case , __snake_case ) , mf_knapsack(i - 1 , __snake_case , __snake_case , j - wt[i - 1] ) + val[i - 1] , ) _lowerCamelCase : List[Any] = val return f[i][j] def _snake_case ( __snake_case : Dict , __snake_case : Tuple , __snake_case : str , __snake_case : List[Any] ): """simple docstring""" _lowerCamelCase : Tuple = [[0] * (w + 1) for _ in range(n + 1 )] for i in range(1 , n + 1 ): for w_ in range(1 , w + 1 ): if wt[i - 1] <= w_: _lowerCamelCase : List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] ) else: _lowerCamelCase : Union[str, Any] = dp[i - 1][w_] return dp[n][w_], dp def _snake_case ( __snake_case : int , __snake_case : list , __snake_case : list ): """simple docstring""" if not (isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) )): raise ValueError( """Both the weights and values vectors must be either lists or tuples""" ) _lowerCamelCase : int = len(__snake_case ) if num_items != len(__snake_case ): _lowerCamelCase : List[Any] = ( """The number of weights must be the same as the number of values.\n""" F'But got {num_items} weights and {len(__snake_case )} values' ) raise ValueError(__snake_case ) for i in range(__snake_case ): if not isinstance(wt[i] , __snake_case ): _lowerCamelCase : List[Any] = ( """All weights must be integers but got weight of """ F'type {type(wt[i] )} at index {i}' ) raise TypeError(__snake_case ) _lowerCamelCase , _lowerCamelCase : int = knapsack(__snake_case , __snake_case , __snake_case , __snake_case ) _lowerCamelCase : set = set() _construct_solution(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) return optimal_val, example_optional_set def _snake_case ( __snake_case : list , __snake_case : list , __snake_case : int , __snake_case : int , __snake_case : set ): """simple docstring""" if i > 0 and j > 0: if dp[i - 1][j] == dp[i][j]: _construct_solution(__snake_case , __snake_case , i - 1 , __snake_case , __snake_case ) else: optimal_set.add(__snake_case ) _construct_solution(__snake_case , __snake_case , i - 1 , j - wt[i - 1] , __snake_case ) if __name__ == "__main__": UpperCAmelCase = [3, 2, 4, 4] UpperCAmelCase = [4, 3, 2, 3] UpperCAmelCase = 4 UpperCAmelCase = 6 UpperCAmelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)] UpperCAmelCase , UpperCAmelCase = knapsack(w, wt, val, n) print(optimal_solution) print(mf_knapsack(n, wt, val, w)) # switched the n and w # testing the dynamic programming problem with example # the optimal subset for the above example are items 3 and 4 UpperCAmelCase , UpperCAmelCase = knapsack_with_example_solution(w, wt, val) assert optimal_solution == 8 assert optimal_subset == {3, 4} print("""optimal_value = """, optimal_solution) print("""An optimal subset corresponding to the optimal value""", optimal_subset)
88
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def _snake_case ( ): """simple docstring""" _lowerCamelCase : Any = HfArgumentParser(__snake_case ) _lowerCamelCase : int = parser.parse_args_into_dataclasses()[0] _lowerCamelCase : Dict = TensorFlowBenchmark(args=__snake_case ) try: _lowerCamelCase : Optional[int] = parser.parse_args_into_dataclasses()[0] except ValueError as e: _lowerCamelCase : Union[str, Any] = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" _lowerCamelCase : List[str] = """ """.join(str(__snake_case ).split(""" """ )[:-1] ) _lowerCamelCase : Dict = """""" _lowerCamelCase : List[Any] = eval(str(__snake_case ).split(""" """ )[-1] ) _lowerCamelCase : Tuple = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__snake_case ) if len(__snake_case ) > 0: _lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(__snake_case ) raise ValueError(__snake_case ) benchmark.run() if __name__ == "__main__": main()
88
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class lowercase__ ( unittest.TestCase ): def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Dict = tempfile.mkdtemp() _lowerCamelCase : int = BlipImageProcessor() _lowerCamelCase : str = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""") _lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""") _lowerCamelCase : str = InstructBlipProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) processor.save_pretrained(self.tmpdirname) def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Tuple: return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE).tokenizer def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> Optional[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE).image_processor def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> str: return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE).qformer_tokenizer def UpperCamelCase_ ( self) -> List[Any]: shutil.rmtree(self.tmpdirname) def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Any = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)] _lowerCamelCase : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs] return image_inputs def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Any = InstructBlipProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , ) processor.save_pretrained(self.tmpdirname) _lowerCamelCase : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""") _lowerCamelCase : Dict = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0) _lowerCamelCase : Union[str, Any] = InstructBlipProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE) self.assertIsInstance(processor.qformer_tokenizer , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[int] = self.get_image_processor() _lowerCamelCase : List[Any] = self.get_tokenizer() _lowerCamelCase : int = self.get_qformer_tokenizer() _lowerCamelCase : Union[str, Any] = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , qformer_tokenizer=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""np""") _lowerCamelCase : Any = processor(images=SCREAMING_SNAKE_CASE , return_tensors="""np""") for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : int = self.get_tokenizer() _lowerCamelCase : str = self.get_qformer_tokenizer() _lowerCamelCase : Optional[Any] = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , qformer_tokenizer=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[str] = """lower newer""" _lowerCamelCase : Dict = processor(text=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : List[Any] = qformer_tokenizer(SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] , encoded_processor[key]) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key]) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = self.get_image_processor() _lowerCamelCase : List[str] = self.get_tokenizer() _lowerCamelCase : Optional[Any] = self.get_qformer_tokenizer() _lowerCamelCase : str = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , qformer_tokenizer=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = """lower newer""" _lowerCamelCase : Union[str, Any] = self.prepare_image_inputs() _lowerCamelCase : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE) self.assertListEqual( list(inputs.keys()) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE): processor() def UpperCamelCase_ ( self) -> Any: _lowerCamelCase : Optional[Any] = self.get_image_processor() _lowerCamelCase : Union[str, Any] = self.get_tokenizer() _lowerCamelCase : Any = self.get_qformer_tokenizer() _lowerCamelCase : List[str] = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , qformer_tokenizer=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowerCamelCase : str = processor.batch_decode(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : List[Any] = self.get_image_processor() _lowerCamelCase : str = self.get_tokenizer() _lowerCamelCase : Optional[int] = self.get_qformer_tokenizer() _lowerCamelCase : int = InstructBlipProcessor( tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE , qformer_tokenizer=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = """lower newer""" _lowerCamelCase : str = self.prepare_image_inputs() _lowerCamelCase : List[str] = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE) self.assertListEqual( list(inputs.keys()) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
88
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""", """kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""", """kssteven/ibert-roberta-large-mnli""": ( """https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json""" ), } class lowercase__ ( A_ ): __UpperCAmelCase = '''ibert''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE="none" , **SCREAMING_SNAKE_CASE , ) -> Any: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = vocab_size _lowerCamelCase : Dict = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : int = num_attention_heads _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : str = intermediate_size _lowerCamelCase : Union[str, Any] = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : Any = max_position_embeddings _lowerCamelCase : Dict = type_vocab_size _lowerCamelCase : List[Any] = initializer_range _lowerCamelCase : Dict = layer_norm_eps _lowerCamelCase : List[Any] = position_embedding_type _lowerCamelCase : Any = quant_mode _lowerCamelCase : List[str] = force_dequant class lowercase__ ( A_ ): @property def UpperCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _lowerCamelCase : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""} else: _lowerCamelCase : Optional[int] = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ])
88
1
"""simple docstring""" import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=36 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1000 , ) -> Tuple: _lowerCamelCase : Dict = parent _lowerCamelCase : Union[str, Any] = batch_size _lowerCamelCase : Dict = num_channels _lowerCamelCase : int = image_size _lowerCamelCase : Optional[Any] = patch_size _lowerCamelCase : str = text_seq_length _lowerCamelCase : int = is_training _lowerCamelCase : Optional[Any] = use_input_mask _lowerCamelCase : List[str] = use_token_type_ids _lowerCamelCase : Dict = use_labels _lowerCamelCase : Dict = vocab_size _lowerCamelCase : Optional[int] = hidden_size _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Optional[int] = num_attention_heads _lowerCamelCase : List[Any] = intermediate_size _lowerCamelCase : Tuple = hidden_act _lowerCamelCase : int = hidden_dropout_prob _lowerCamelCase : Tuple = attention_probs_dropout_prob _lowerCamelCase : List[str] = max_position_embeddings _lowerCamelCase : Optional[int] = type_vocab_size _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : str = initializer_range _lowerCamelCase : Optional[Any] = coordinate_size _lowerCamelCase : Dict = shape_size _lowerCamelCase : Tuple = num_labels _lowerCamelCase : Union[str, Any] = num_choices _lowerCamelCase : str = scope _lowerCamelCase : int = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) _lowerCamelCase : str = text_seq_length _lowerCamelCase : List[Any] = (image_size // patch_size) ** 2 + 1 _lowerCamelCase : List[Any] = self.text_seq_length + self.image_seq_length def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size) _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCamelCase : Optional[int] = bbox[i, j, 3] _lowerCamelCase : Tuple = bbox[i, j, 1] _lowerCamelCase : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCamelCase : Any = bbox[i, j, 2] _lowerCamelCase : str = bbox[i, j, 0] _lowerCamelCase : Dict = t _lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _lowerCamelCase : Any = None if self.use_input_mask: _lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.text_seq_length]) _lowerCamelCase : str = None if self.use_token_type_ids: _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size) _lowerCamelCase : Tuple = None _lowerCamelCase : Optional[int] = None if self.use_labels: _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels) _lowerCamelCase : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str: _lowerCamelCase : Tuple = LayoutLMvaModel(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() # text + image _lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = model( SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : str = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) # text only _lowerCamelCase : Tuple = model(SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size)) # image only _lowerCamelCase : List[str] = model(pixel_values=SCREAMING_SNAKE_CASE) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Dict: _lowerCamelCase : int = self.num_labels _lowerCamelCase : List[str] = LayoutLMvaForSequenceClassification(SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Tuple = model( SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = self.num_labels _lowerCamelCase : int = LayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Dict = model( SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]: _lowerCamelCase : List[str] = LayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE) model.to(SCREAMING_SNAKE_CASE) model.eval() _lowerCamelCase : Optional[Any] = model( SCREAMING_SNAKE_CASE , bbox=SCREAMING_SNAKE_CASE , pixel_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Any = config_and_inputs _lowerCamelCase : Optional[int] = { """input_ids""": input_ids, """bbox""": bbox, """pixel_values""": pixel_values, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) __UpperCAmelCase = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> int: # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Optional[int] = LayoutLMvaModelTester(self) _lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> Optional[Any]: _lowerCamelCase : List[str] = copy.deepcopy(SCREAMING_SNAKE_CASE) if model_class in get_values(SCREAMING_SNAKE_CASE): _lowerCamelCase : Union[str, Any] = { k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous() if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE): _lowerCamelCase : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) elif model_class in get_values(SCREAMING_SNAKE_CASE): _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) elif model_class in [ *get_values(SCREAMING_SNAKE_CASE), ]: _lowerCamelCase : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE) elif model_class in [ *get_values(SCREAMING_SNAKE_CASE), ]: _lowerCamelCase : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE , ) return inputs_dict def UpperCamelCase_ ( self) -> str: self.config_tester.run_common_tests() def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : Dict = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> List[str]: _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> str: _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self) -> Union[str, Any]: _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> Tuple: for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Any = LayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) def _snake_case ( ): """simple docstring""" _lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class lowercase__ ( unittest.TestCase ): @cached_property def UpperCamelCase_ ( self) -> int: return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE) if is_vision_available() else None @slow def UpperCamelCase_ ( self) -> str: _lowerCamelCase : str = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""").to(SCREAMING_SNAKE_CASE) _lowerCamelCase : Dict = self.default_image_processor _lowerCamelCase : List[Any] = prepare_img() _lowerCamelCase : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""").pixel_values.to(SCREAMING_SNAKE_CASE) _lowerCamelCase : int = torch.tensor([[1, 2]]) _lowerCamelCase : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0) # forward pass _lowerCamelCase : Tuple = model( input_ids=input_ids.to(SCREAMING_SNAKE_CASE) , bbox=bbox.to(SCREAMING_SNAKE_CASE) , pixel_values=pixel_values.to(SCREAMING_SNAKE_CASE) , ) # verify the logits _lowerCamelCase : Union[str, Any] = torch.Size((1, 199, 768)) self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[int] = torch.tensor( [[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(SCREAMING_SNAKE_CASE) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
88
"""simple docstring""" from __future__ import annotations import queue class lowercase__ : def __init__( self , SCREAMING_SNAKE_CASE) -> int: _lowerCamelCase : int = data _lowerCamelCase : List[str] = None _lowerCamelCase : Any = None def _snake_case ( ): """simple docstring""" print("""\n********Press N to stop entering at any point of time********\n""" ) _lowerCamelCase : Optional[int] = input("""Enter the value of the root node: """ ).strip().lower() _lowerCamelCase : queue.Queue = queue.Queue() _lowerCamelCase : Optional[int] = TreeNode(int(__snake_case ) ) q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Tuple = q.get() _lowerCamelCase : Any = F'Enter the left node of {node_found.data}: ' _lowerCamelCase : Union[str, Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : Dict = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[str] = left_node q.put(__snake_case ) _lowerCamelCase : Optional[int] = F'Enter the right node of {node_found.data}: ' _lowerCamelCase : Optional[Any] = input(__snake_case ).strip().lower() or """n""" if check == "n": return tree_node _lowerCamelCase : List[Any] = TreeNode(int(__snake_case ) ) _lowerCamelCase : List[Any] = right_node q.put(__snake_case ) raise def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return print(node.data , end=""",""" ) pre_order(node.left ) pre_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return in_order(node.left ) print(node.data , end=""",""" ) in_order(node.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return post_order(node.left ) post_order(node.right ) print(node.data , end=""",""" ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Any = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: q.put(node_dequeued.left ) if node_dequeued.right: q.put(node_dequeued.right ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : queue.Queue = queue.Queue() q.put(__snake_case ) while not q.empty(): _lowerCamelCase : Optional[Any] = [] while not q.empty(): _lowerCamelCase : Dict = q.get() print(node_dequeued.data , end=""",""" ) if node_dequeued.left: list_.append(node_dequeued.left ) if node_dequeued.right: list_.append(node_dequeued.right ) print() for node in list_: q.put(__snake_case ) def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : Optional[int] = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""" ) stack.append(__snake_case ) _lowerCamelCase : Tuple = n.left # end of while means current node doesn't have left child _lowerCamelCase : Optional[Any] = stack.pop() # start to traverse its right child _lowerCamelCase : Dict = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase : list[TreeNode] = [] _lowerCamelCase : int = node while n or stack: while n: stack.append(__snake_case ) _lowerCamelCase : Any = n.left _lowerCamelCase : Optional[Any] = stack.pop() print(n.data , end=""",""" ) _lowerCamelCase : List[Any] = n.right def _snake_case ( __snake_case : TreeNode ): """simple docstring""" if not isinstance(__snake_case , __snake_case ) or not node: return _lowerCamelCase , _lowerCamelCase : Union[str, Any] = [], [] _lowerCamelCase : Optional[Any] = node stacka.append(__snake_case ) while stacka: # to find the reversed order of post order, store it in stack2 _lowerCamelCase : Union[str, Any] = stacka.pop() if n.left: stacka.append(n.left ) if n.right: stacka.append(n.right ) stacka.append(__snake_case ) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""" ) def _snake_case ( __snake_case : str = "" , __snake_case : Any=50 , __snake_case : List[str]="*" ): """simple docstring""" if not s: return "\n" + width * char _lowerCamelCase , _lowerCamelCase : Optional[int] = divmod(width - len(__snake_case ) - 2 , 2 ) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt("""Binary Tree Traversals""")) UpperCAmelCase = build_tree() print(prompt("""Pre Order Traversal""")) pre_order(node) print(prompt() + """\n""") print(prompt("""In Order Traversal""")) in_order(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal""")) post_order(node) print(prompt() + """\n""") print(prompt("""Level Order Traversal""")) level_order(node) print(prompt() + """\n""") print(prompt("""Actual Level Order Traversal""")) level_order_actual(node) print("""*""" * 50 + """\n""") print(prompt("""Pre Order Traversal - Iteration Version""")) pre_order_iter(node) print(prompt() + """\n""") print(prompt("""In Order Traversal - Iteration Version""")) in_order_iter(node) print(prompt() + """\n""") print(prompt("""Post Order Traversal - Iteration Version""")) post_order_iter(node) print(prompt())
88
1
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowercase__ ( A_ ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = "arrow" , **SCREAMING_SNAKE_CASE , ) -> List[Any]: super().__init__( split=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE , streaming=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : str = load_from_cache_file _lowerCamelCase : Tuple = file_format _lowerCamelCase : List[str] = Spark( df=SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , working_dir=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[Any]: if self.streaming: return self.builder.as_streaming_dataset(split=self.split) _lowerCamelCase : str = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=SCREAMING_SNAKE_CASE , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split)
88
"""simple docstring""" from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class lowercase__ : __UpperCAmelCase = XGLMConfig __UpperCAmelCase = {} __UpperCAmelCase = '''gelu''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , ) -> List[str]: _lowerCamelCase : Optional[int] = parent _lowerCamelCase : int = batch_size _lowerCamelCase : str = seq_length _lowerCamelCase : Any = is_training _lowerCamelCase : int = use_input_mask _lowerCamelCase : Union[str, Any] = use_labels _lowerCamelCase : str = vocab_size _lowerCamelCase : List[str] = d_model _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : int = ffn_dim _lowerCamelCase : str = activation_function _lowerCamelCase : Optional[int] = activation_dropout _lowerCamelCase : Tuple = attention_dropout _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Dict = initializer_range _lowerCamelCase : Optional[Any] = None _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[Any] = 2 _lowerCamelCase : str = 1 def UpperCamelCase_ ( self) -> int: return XGLMConfig.from_pretrained("""facebook/xglm-564M""") def UpperCamelCase_ ( self) -> int: _lowerCamelCase : Union[str, Any] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3) _lowerCamelCase : str = None if self.use_input_mask: _lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length]) _lowerCamelCase : Tuple = self.get_config() _lowerCamelCase : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2) return ( config, input_ids, input_mask, head_mask, ) def UpperCamelCase_ ( self) -> Optional[int]: return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE , ) def UpperCamelCase_ ( self) -> Optional[int]: _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : str = config_and_inputs _lowerCamelCase : Optional[Any] = { """input_ids""": input_ids, """head_mask""": head_mask, } return config, inputs_dict @require_tf class lowercase__ ( A_ ,A_ ,unittest.TestCase ): __UpperCAmelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __UpperCAmelCase = (TFXGLMForCausalLM,) if is_tf_available() else () __UpperCAmelCase = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) __UpperCAmelCase = False __UpperCAmelCase = False __UpperCAmelCase = False def UpperCamelCase_ ( self) -> Optional[Any]: _lowerCamelCase : Optional[Any] = TFXGLMModelTester(self) _lowerCamelCase : str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , n_embd=37) def UpperCamelCase_ ( self) -> Dict: self.config_tester.run_common_tests() @slow def UpperCamelCase_ ( self) -> List[Any]: for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Tuple = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE) self.assertIsNotNone(SCREAMING_SNAKE_CASE) @unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""") def UpperCamelCase_ ( self) -> List[Any]: super().test_resize_token_embeddings() @require_tf class lowercase__ ( unittest.TestCase ): @slow def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE=True) -> List[Any]: _lowerCamelCase : List[str] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off _lowerCamelCase : Dict = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581] # fmt: on _lowerCamelCase : str = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , num_beams=1) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> int: _lowerCamelCase : int = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") tf.random.set_seed(0) _lowerCamelCase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""") _lowerCamelCase : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(""":/CPU:0"""): _lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE , do_sample=SCREAMING_SNAKE_CASE , seed=[7, 0]) _lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = ( """Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due""" ) self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) @slow def UpperCamelCase_ ( self) -> List[Any]: _lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""") _lowerCamelCase : List[Any] = """left""" # use different length sentences to test batching _lowerCamelCase : List[Any] = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When""", """Hello, my dog is a little""", ] _lowerCamelCase : Union[str, Any] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="""tf""" , padding=SCREAMING_SNAKE_CASE) _lowerCamelCase : int = inputs["""input_ids"""] _lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12) _lowerCamelCase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""").input_ids _lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Tuple = tokenizer(sentences[1] , return_tensors="""tf""").input_ids _lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_new_tokens=12) _lowerCamelCase : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Optional[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) _lowerCamelCase : Tuple = [ """This is an extremelly long sentence that only exists to test the ability of the model to cope with """ """left-padding, such as in batched generation. The output for the sequence below should be the same """ """regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """ """a single""", """Hello, my dog is a little bit of a shy one, but he is very friendly""", ] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
88
1
"""simple docstring""" import argparse import copy def _snake_case ( __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : str = {} with open(__snake_case ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: _lowerCamelCase : Optional[Any] = [] _list.append([line.split()[1], line.split()[2]] ) _lowerCamelCase : int = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: _lowerCamelCase : str = [] _list.append([line.split()[0], line.split()[2]] ) _lowerCamelCase : Any = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _snake_case ( __snake_case : List[Any] , __snake_case : int ): """simple docstring""" with open(__snake_case ) as f: _lowerCamelCase : str = f.read(1 ) _lowerCamelCase : Dict = start_node _lowerCamelCase : Tuple = [] _lowerCamelCase : int = start_node _lowerCamelCase : Tuple = 0 while visiting not in first_solution: _lowerCamelCase : int = 10000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(__snake_case ) and k[0] not in first_solution: _lowerCamelCase : List[Any] = k[1] _lowerCamelCase : Any = k[0] first_solution.append(__snake_case ) _lowerCamelCase : Optional[int] = distance_of_first_solution + int(__snake_case ) _lowerCamelCase : Any = best_node first_solution.append(__snake_case ) _lowerCamelCase : List[Any] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 _lowerCamelCase : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 10000 ) return first_solution, distance_of_first_solution def _snake_case ( __snake_case : List[Any] , __snake_case : Optional[Any] ): """simple docstring""" _lowerCamelCase : str = [] for n in solution[1:-1]: _lowerCamelCase : Tuple = solution.index(__snake_case ) for kn in solution[1:-1]: _lowerCamelCase : Union[str, Any] = solution.index(__snake_case ) if n == kn: continue _lowerCamelCase : Optional[int] = copy.deepcopy(__snake_case ) _lowerCamelCase : Any = kn _lowerCamelCase : str = n _lowerCamelCase : List[str] = 0 for k in _tmp[:-1]: _lowerCamelCase : List[Any] = _tmp[_tmp.index(__snake_case ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: _lowerCamelCase : str = distance + int(i[1] ) _tmp.append(__snake_case ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) _lowerCamelCase : List[Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _snake_case ( __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : List[Any] ): """simple docstring""" _lowerCamelCase : Any = 1 _lowerCamelCase : str = first_solution _lowerCamelCase : int = [] _lowerCamelCase : str = distance_of_first_solution _lowerCamelCase : int = solution while count <= iters: _lowerCamelCase : Optional[Any] = find_neighborhood(__snake_case , __snake_case ) _lowerCamelCase : Tuple = 0 _lowerCamelCase : List[Any] = neighborhood[index_of_best_solution] _lowerCamelCase : List[str] = len(__snake_case ) - 1 _lowerCamelCase : Optional[int] = False while not found: _lowerCamelCase : str = 0 while i < len(__snake_case ): if best_solution[i] != solution[i]: _lowerCamelCase : Optional[Any] = best_solution[i] _lowerCamelCase : List[str] = solution[i] break _lowerCamelCase : int = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) _lowerCamelCase : Optional[int] = True _lowerCamelCase : Any = best_solution[:-1] _lowerCamelCase : Dict = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: _lowerCamelCase : Tuple = cost _lowerCamelCase : Optional[Any] = solution else: _lowerCamelCase : Dict = index_of_best_solution + 1 _lowerCamelCase : Union[str, Any] = neighborhood[index_of_best_solution] if len(__snake_case ) >= size: tabu_list.pop(0 ) _lowerCamelCase : Any = count + 1 return best_solution_ever, best_cost def _snake_case ( __snake_case : Any=None ): """simple docstring""" _lowerCamelCase : Dict = generate_neighbours(args.File ) _lowerCamelCase , _lowerCamelCase : Any = generate_first_solution( args.File , __snake_case ) _lowerCamelCase , _lowerCamelCase : List[str] = tabu_search( __snake_case , __snake_case , __snake_case , args.Iterations , args.Size , ) print(F'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser(description="""Tabu Search""") parser.add_argument( """-f""", """--File""", type=str, help="""Path to the file containing the data""", required=True, ) parser.add_argument( """-i""", """--Iterations""", type=int, help="""How many iterations the algorithm should perform""", required=True, ) parser.add_argument( """-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True ) # Pass the arguments to main method main(parser.parse_args())
88
"""simple docstring""" from collections import defaultdict def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : Tuple = first_str.lower().strip() _lowerCamelCase : int = second_str.lower().strip() # Remove whitespace _lowerCamelCase : Any = first_str.replace(""" """ , """""" ) _lowerCamelCase : List[str] = second_str.replace(""" """ , """""" ) # Strings of different lengths are not anagrams if len(__snake_case ) != len(__snake_case ): return False # Default values for count should be 0 _lowerCamelCase : defaultdict[str, int] = defaultdict(__snake_case ) # For each character in input strings, # increment count in the corresponding for i in range(len(__snake_case ) ): count[first_str[i]] += 1 count[second_str[i]] -= 1 return all(_count == 0 for _count in count.values() ) if __name__ == "__main__": from doctest import testmod testmod() UpperCAmelCase = input("""Enter the first string """).strip() UpperCAmelCase = input("""Enter the second string """).strip() UpperCAmelCase = check_anagrams(input_a, input_b) print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
88
1
"""simple docstring""" import functools def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : int = len(__snake_case ) _lowerCamelCase : List[Any] = len(__snake_case ) @functools.cache def min_distance(__snake_case : int , __snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa _lowerCamelCase : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
88
"""simple docstring""" from __future__ import annotations from numpy import array, cos, cross, floataa, radians, sin from numpy.typing import NDArray def _snake_case ( __snake_case : float , __snake_case : float , __snake_case : bool = False ): """simple docstring""" if radian_mode: return [magnitude * cos(__snake_case ), magnitude * sin(__snake_case )] return [magnitude * cos(radians(__snake_case ) ), magnitude * sin(radians(__snake_case ) )] def _snake_case ( __snake_case : NDArray[floataa] , __snake_case : NDArray[floataa] , __snake_case : float = 10**-1 ): """simple docstring""" _lowerCamelCase : NDArray[floataa] = cross(__snake_case , __snake_case ) _lowerCamelCase : float = sum(__snake_case ) return abs(__snake_case ) < eps if __name__ == "__main__": # Test to check if it works UpperCAmelCase = array( [ polar_force(718.4, 180 - 30), polar_force(879.54, 45), polar_force(100, -90), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem 1 in image_data/2D_problems.jpg UpperCAmelCase = array( [ polar_force(30 * 9.81, 15), polar_force(215, 180 - 45), polar_force(264, 90 - 30), ] ) UpperCAmelCase = array([[0, 0], [0, 0], [0, 0]]) assert in_static_equilibrium(forces, location) # Problem in image_data/2D_problems_1.jpg UpperCAmelCase = array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]]) UpperCAmelCase = array([[0, 0], [6, 0], [10, 0], [12, 0]]) assert in_static_equilibrium(forces, location) import doctest doctest.testmod()
88
1
"""simple docstring""" UpperCAmelCase = [ """Audio""", """Array2D""", """Array3D""", """Array4D""", """Array5D""", """ClassLabel""", """Features""", """Sequence""", """Value""", """Image""", """Translation""", """TranslationVariableLanguages""", ] from .audio import Audio from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value from .image import Image from .translation import Translation, TranslationVariableLanguages
88
"""simple docstring""" import random def _snake_case ( __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = a[left_index] _lowerCamelCase : Dict = left_index + 1 for j in range(left_index + 1 , __snake_case ): if a[j] < pivot: _lowerCamelCase , _lowerCamelCase : List[str] = a[i], a[j] i += 1 _lowerCamelCase , _lowerCamelCase : Optional[int] = a[i - 1], a[left_index] return i - 1 def _snake_case ( __snake_case : Tuple , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" if left < right: _lowerCamelCase : Any = random.randint(__snake_case , right - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = ( a[left], a[pivot], ) # switches the pivot with the left most bound _lowerCamelCase : List[str] = partition(__snake_case , __snake_case , __snake_case ) quick_sort_random( __snake_case , __snake_case , __snake_case ) # recursive quicksort to the left of the pivot point quick_sort_random( __snake_case , pivot_index + 1 , __snake_case ) # recursive quicksort to the right of the pivot point def _snake_case ( ): """simple docstring""" _lowerCamelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""" ).strip() _lowerCamelCase : int = [int(__snake_case ) for item in user_input.split(""",""" )] quick_sort_random(__snake_case , 0 , len(__snake_case ) ) print(__snake_case ) if __name__ == "__main__": main()
88
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roformer import RoFormerTokenizer from .tokenization_utils import JiebaPreTokenizer UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCAmelCase = { """vocab_file""": { """junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""", """junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""", """junnyu/roformer_chinese_char_small""": ( """https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt""" ), """junnyu/roformer_chinese_char_base""": ( """https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt""" ), """junnyu/roformer_small_discriminator""": ( """https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt""" ), """junnyu/roformer_small_generator""": ( """https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt""" ), } } UpperCAmelCase = { """junnyu/roformer_chinese_small""": 1536, """junnyu/roformer_chinese_base""": 1536, """junnyu/roformer_chinese_char_small""": 512, """junnyu/roformer_chinese_char_base""": 512, """junnyu/roformer_small_discriminator""": 128, """junnyu/roformer_small_generator""": 128, } UpperCAmelCase = { """junnyu/roformer_chinese_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_base""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True}, """junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True}, """junnyu/roformer_small_discriminator""": {"""do_lower_case""": True}, """junnyu/roformer_small_generator""": {"""do_lower_case""": True}, } class lowercase__ ( A_ ): __UpperCAmelCase = VOCAB_FILES_NAMES __UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION __UpperCAmelCase = RoFormerTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( pre_tok_state.get("""lowercase""" , SCREAMING_SNAKE_CASE) != do_lower_case or pre_tok_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE) != strip_accents ): _lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""")) _lowerCamelCase : List[str] = do_lower_case _lowerCamelCase : str = strip_accents _lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE) _lowerCamelCase : Any = do_lower_case def __getstate__( self) -> str: _lowerCamelCase : Any = self.__dict__.copy() _lowerCamelCase : Dict = BertPreTokenizer() return state def __setstate__( self , SCREAMING_SNAKE_CASE) -> Union[str, Any]: _lowerCamelCase : Tuple = d _lowerCamelCase : List[Any] = self.__dict__["""_tokenizer"""].get_vocab() _lowerCamelCase : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE)) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None) -> Any: _lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]: _lowerCamelCase : List[Any] = [self.sep_token_id] _lowerCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]: _lowerCamelCase : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE) return tuple(SCREAMING_SNAKE_CASE) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: _lowerCamelCase : List[Any] = BertPreTokenizer() return super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness UpperCAmelCase = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ UpperCAmelCase = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ UpperCAmelCase = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ UpperCAmelCase = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ UpperCAmelCase = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> str: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=[1, 10, 100] , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=3.0) -> Union[str, Any]: if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=SCREAMING_SNAKE_CASE) as executor: _lowerCamelCase : Optional[int] = [] _lowerCamelCase : Optional[int] = Counter() _lowerCamelCase : Any = 0 _lowerCamelCase : List[Any] = defaultdict(SCREAMING_SNAKE_CASE) for task_id, (candidates, test_case) in enumerate(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)): for candidate in candidates: _lowerCamelCase : Any = candidate + """\n""" + test_case _lowerCamelCase : Union[str, Any] = (test_program, timeout, task_id, completion_id[task_id]) _lowerCamelCase : List[str] = executor.submit(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE) futures.append(SCREAMING_SNAKE_CASE) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(SCREAMING_SNAKE_CASE): _lowerCamelCase : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _lowerCamelCase , _lowerCamelCase : List[Any] = [], [] for result in results.values(): result.sort() _lowerCamelCase : List[str] = [r[1]["""passed"""] for r in result] total.append(len(SCREAMING_SNAKE_CASE)) correct.append(sum(SCREAMING_SNAKE_CASE)) _lowerCamelCase : List[Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE) _lowerCamelCase : Union[str, Any] = k _lowerCamelCase : Optional[Any] = {F'pass@{k}': estimate_pass_at_k(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE).mean() for k in ks if (total >= k).all()} return pass_at_k, results def _snake_case ( __snake_case : List[str] , __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" def estimator(__snake_case : int , __snake_case : int , __snake_case : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__snake_case , __snake_case ): _lowerCamelCase : Optional[int] = itertools.repeat(__snake_case , len(__snake_case ) ) else: assert len(__snake_case ) == len(__snake_case ) _lowerCamelCase : List[str] = iter(__snake_case ) return np.array([estimator(int(__snake_case ) , int(__snake_case ) , __snake_case ) for n, c in zip(__snake_case , __snake_case )] )
88
1
"""simple docstring""" import random from typing import Any def _snake_case ( __snake_case : list ): """simple docstring""" for _ in range(len(__snake_case ) ): _lowerCamelCase : Optional[Any] = random.randint(0 , len(__snake_case ) - 1 ) _lowerCamelCase : Union[str, Any] = random.randint(0 , len(__snake_case ) - 1 ) _lowerCamelCase , _lowerCamelCase : Optional[Any] = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase = ["""python""", """says""", """hello""", """!"""] print("""Fisher-Yates Shuffle:""") print("""List""", integers, strings) print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
88
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase = """\ @misc{wu2016googles, title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } """ UpperCAmelCase = """\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the 'GLEU score'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score's range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. """ UpperCAmelCase = """\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: 'google_bleu': google_bleu score Examples: Example 1: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.44 Example 2: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results[\"google_bleu\"], 2)) 0.61 Example 3: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results[\"google_bleu\"], 2)) 0.53 Example 4: >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always', ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat'] >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which', ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never', ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat'] >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never', ... 'heed', 'the', 'cat', 'commands'] >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the', ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions', ... 'of', 'the', 'cat'] >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was', ... 'interested', 'in', 'world', 'history'] >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history', ... 'because', 'he', 'read', 'the', 'book'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric(\"google_bleu\") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results[\"google_bleu\"], 2)) 0.4 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class lowercase__ ( datasets.Metric ): def UpperCamelCase_ ( self) -> MetricInfo: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence"""), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""") , id="""sequence""") , id="""references"""), }) , ) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 4 , ) -> Dict[str, float]: return { "google_bleu": gleu_score.corpus_gleu( list_of_references=SCREAMING_SNAKE_CASE , hypotheses=SCREAMING_SNAKE_CASE , min_len=SCREAMING_SNAKE_CASE , max_len=SCREAMING_SNAKE_CASE) }
88
"""simple docstring""" def _snake_case ( __snake_case : str , __snake_case : str ): """simple docstring""" _lowerCamelCase : str = len(__snake_case ) _lowerCamelCase : Union[str, Any] = len(__snake_case ) _lowerCamelCase : int = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _lowerCamelCase : Union[str, Any] = True for i in range(__snake_case ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _lowerCamelCase : Tuple = True if a[i].islower(): _lowerCamelCase : Tuple = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
88
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class lowercase__ ( A_ ): __UpperCAmelCase = '''markuplm''' def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-1_2 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=216 , SCREAMING_SNAKE_CASE=1001 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> int: super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) _lowerCamelCase : str = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : List[Any] = num_hidden_layers _lowerCamelCase : Any = num_attention_heads _lowerCamelCase : Union[str, Any] = hidden_act _lowerCamelCase : Optional[int] = intermediate_size _lowerCamelCase : Optional[int] = hidden_dropout_prob _lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob _lowerCamelCase : List[Any] = max_position_embeddings _lowerCamelCase : Optional[int] = type_vocab_size _lowerCamelCase : int = initializer_range _lowerCamelCase : Tuple = layer_norm_eps _lowerCamelCase : str = position_embedding_type _lowerCamelCase : List[Any] = use_cache _lowerCamelCase : Tuple = classifier_dropout # additional properties _lowerCamelCase : Tuple = max_depth _lowerCamelCase : List[Any] = max_xpath_tag_unit_embeddings _lowerCamelCase : Optional[int] = max_xpath_subs_unit_embeddings _lowerCamelCase : Tuple = tag_pad_id _lowerCamelCase : int = subs_pad_id _lowerCamelCase : Optional[int] = xpath_unit_hidden_size
88
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor UpperCAmelCase = logging.get_logger(__name__) class lowercase__ ( A_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None: warnings.warn( """The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
88
1
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--user""", type=str, default="""ubuntu""") parser.add_argument("""--host""", type=str, default="""localhost""") parser.add_argument("""--key_path""", type=str, default=None) parser.add_argument("""--instance""", type=str, default="""V100:1""") parser.add_argument("""--provider""", type=str, default="""cheapest""") parser.add_argument("""--use_spot""", type=bool, default=False) parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""") UpperCAmelCase , UpperCAmelCase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("""Cannot specify both BYO and on-demand cluster args""") UpperCAmelCase = rh.cluster( name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path} ) else: UpperCAmelCase = rh.cluster( name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) UpperCAmelCase = args.example.rsplit("""/""", 1)[0] # Set up remote environment cluster.install_packages(["""pip:./"""]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
88
"""simple docstring""" from math import isqrt, loga def _snake_case ( __snake_case : int ): """simple docstring""" _lowerCamelCase : List[str] = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , __snake_case , __snake_case ): _lowerCamelCase : Optional[int] = False return [i for i in range(2 , __snake_case ) if is_prime[i]] def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ): """simple docstring""" _lowerCamelCase : Union[str, Any] = degree * loga(__snake_case ) _lowerCamelCase : Union[str, Any] = int(__snake_case ) _lowerCamelCase : Dict = calculate_prime_numbers(__snake_case ) _lowerCamelCase : Optional[int] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : Any = len(__snake_case ) - 1 while left < right: while ( prime_numbers[right] * loga(prime_numbers[left] ) + prime_numbers[left] * loga(prime_numbers[right] ) > upper_bound ): right -= 1 hybrid_integers_count += right - left left += 1 return hybrid_integers_count if __name__ == "__main__": print(f'''{solution() = }''')
88
1